1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
541 { ICE_IPV4_OFOS, 14 },
545 { ICE_PROTOCOL_LAST, 0 },
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
554 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555 0x00, 0x00, 0x40, 0x00,
556 0x40, 0x11, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561 0x00, 0x00, 0x00, 0x00,
563 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
564 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x85,
567 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568 0x00, 0x00, 0x00, 0x00,
570 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571 0x00, 0x00, 0x40, 0x00,
572 0x40, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
581 { ICE_IPV4_OFOS, 14 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595 0x00, 0x00, 0x40, 0x00,
596 0x40, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601 0x00, 0x00, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608 0x00, 0x00, 0x00, 0x00,
610 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611 0x00, 0x00, 0x3b, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
627 { ICE_IPV6_OFOS, 14 },
631 { ICE_PROTOCOL_LAST, 0 },
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
640 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652 0x00, 0x00, 0x00, 0x00,
654 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x85,
658 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659 0x00, 0x00, 0x00, 0x00,
661 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662 0x00, 0x00, 0x40, 0x00,
663 0x40, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
677 { ICE_PROTOCOL_LAST, 0 },
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698 0x00, 0x00, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708 0x00, 0x00, 0x3b, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
724 { ICE_IPV4_OFOS, 14 },
726 { ICE_GTP_NO_PAY, 42 },
727 { ICE_PROTOCOL_LAST, 0 },
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
733 { ICE_IPV6_OFOS, 14 },
735 { ICE_GTP_NO_PAY, 62 },
736 { ICE_PROTOCOL_LAST, 0 },
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
741 { ICE_ETYPE_OL, 12 },
742 { ICE_VLAN_OFOS, 14},
744 { ICE_PROTOCOL_LAST, 0 },
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
749 { ICE_ETYPE_OL, 12 },
750 { ICE_VLAN_OFOS, 14},
752 { ICE_IPV4_OFOS, 26 },
753 { ICE_PROTOCOL_LAST, 0 },
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, 0x00, 0x00,
761 0x81, 0x00, /* ICE_ETYPE_OL 12 */
763 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
765 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
768 0x00, 0x21, /* PPP Link Layer 24 */
770 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
782 { ICE_ETYPE_OL, 12 },
783 { ICE_VLAN_OFOS, 14},
785 { ICE_IPV4_OFOS, 26 },
787 { ICE_PROTOCOL_LAST, 0 },
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
795 0x81, 0x00, /* ICE_ETYPE_OL 12 */
797 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
799 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
802 0x00, 0x21, /* PPP Link Layer 24 */
804 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805 0x00, 0x01, 0x00, 0x00,
806 0x00, 0x06, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x50, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
822 { ICE_ETYPE_OL, 12 },
823 { ICE_VLAN_OFOS, 14},
825 { ICE_IPV4_OFOS, 26 },
826 { ICE_UDP_ILOS, 46 },
827 { ICE_PROTOCOL_LAST, 0 },
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832 0x00, 0x00, 0x00, 0x00,
833 0x00, 0x00, 0x00, 0x00,
835 0x81, 0x00, /* ICE_ETYPE_OL 12 */
837 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
839 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
842 0x00, 0x21, /* PPP Link Layer 24 */
844 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845 0x00, 0x01, 0x00, 0x00,
846 0x00, 0x11, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851 0x00, 0x08, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
858 { ICE_ETYPE_OL, 12 },
859 { ICE_VLAN_OFOS, 14},
861 { ICE_IPV6_OFOS, 26 },
862 { ICE_PROTOCOL_LAST, 0 },
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
870 0x81, 0x00, /* ICE_ETYPE_OL 12 */
872 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
874 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
877 0x00, 0x57, /* PPP Link Layer 24 */
879 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880 0x00, 0x00, 0x3b, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
896 { ICE_ETYPE_OL, 12 },
897 { ICE_VLAN_OFOS, 14},
899 { ICE_IPV6_OFOS, 26 },
901 { ICE_PROTOCOL_LAST, 0 },
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x81, 0x00, /* ICE_ETYPE_OL 12 */
911 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
913 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
916 0x00, 0x57, /* PPP Link Layer 24 */
918 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
929 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x50, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
941 { ICE_ETYPE_OL, 12 },
942 { ICE_VLAN_OFOS, 14},
944 { ICE_IPV6_OFOS, 26 },
945 { ICE_UDP_ILOS, 66 },
946 { ICE_PROTOCOL_LAST, 0 },
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
954 0x81, 0x00, /* ICE_ETYPE_OL 12 */
956 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
958 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
961 0x00, 0x57, /* PPP Link Layer 24 */
963 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975 0x00, 0x08, 0x00, 0x00,
977 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
982 { ICE_IPV4_OFOS, 14 },
984 { ICE_PROTOCOL_LAST, 0 },
987 static const u8 dummy_ipv4_esp_pkt[] = {
988 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
993 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994 0x00, 0x00, 0x40, 0x00,
995 0x40, 0x32, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005 { ICE_MAC_OFOS, 0 },
1006 { ICE_IPV6_OFOS, 14 },
1008 { ICE_PROTOCOL_LAST, 0 },
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV4_OFOS, 14 },
1037 { ICE_PROTOCOL_LAST, 0 },
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1046 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047 0x00, 0x00, 0x40, 0x00,
1048 0x40, 0x33, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059 { ICE_MAC_OFOS, 0 },
1060 { ICE_IPV6_OFOS, 14 },
1062 { ICE_PROTOCOL_LAST, 0 },
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1071 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1082 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083 0x00, 0x00, 0x00, 0x00,
1084 0x00, 0x00, 0x00, 0x00,
1085 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089 { ICE_MAC_OFOS, 0 },
1090 { ICE_IPV4_OFOS, 14 },
1091 { ICE_UDP_ILOS, 34 },
1093 { ICE_PROTOCOL_LAST, 0 },
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1102 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103 0x00, 0x00, 0x40, 0x00,
1104 0x40, 0x11, 0x00, 0x00,
1105 0x00, 0x00, 0x00, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117 { ICE_MAC_OFOS, 0 },
1118 { ICE_IPV6_OFOS, 14 },
1119 { ICE_UDP_ILOS, 54 },
1121 { ICE_PROTOCOL_LAST, 0 },
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126 0x00, 0x00, 0x00, 0x00,
1127 0x00, 0x00, 0x00, 0x00,
1130 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132 0x00, 0x00, 0x00, 0x00,
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x00, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142 0x00, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_PROTOCOL_LAST, 0 },
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1163 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164 0x00, 0x00, 0x40, 0x00,
1165 0x40, 0x73, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170 0x00, 0x00, 0x00, 0x00,
1171 0x00, 0x00, 0x00, 0x00,
1172 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176 { ICE_MAC_OFOS, 0 },
1177 { ICE_IPV6_OFOS, 14 },
1179 { ICE_PROTOCOL_LAST, 0 },
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184 0x00, 0x00, 0x00, 0x00,
1185 0x00, 0x00, 0x00, 0x00,
1188 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189 0x00, 0x0c, 0x73, 0x40,
1190 0x00, 0x00, 0x00, 0x00,
1191 0x00, 0x00, 0x00, 0x00,
1192 0x00, 0x00, 0x00, 0x00,
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1196 0x00, 0x00, 0x00, 0x00,
1197 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1202 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1205 /* this is a recipe to profile association bitmap */
1206 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1207 ICE_MAX_NUM_PROFILES);
1209 /* this is a profile to recipe association bitmap */
1210 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1211 ICE_MAX_NUM_RECIPES);
1213 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1216 * ice_collect_result_idx - copy result index values
1217 * @buf: buffer that contains the result index
1218 * @recp: the recipe struct to copy data into
1220 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1221 struct ice_sw_recipe *recp)
1223 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1224 ice_set_bit(buf->content.result_indx &
1225 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1229 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1230 * @rid: recipe ID that we are populating
1232 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1234 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1235 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1236 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1237 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1238 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1239 u16 i, j, profile_num = 0;
1240 bool non_tun_valid = false;
1241 bool pppoe_valid = false;
1242 bool vxlan_valid = false;
1243 bool gre_valid = false;
1244 bool gtp_valid = false;
1245 bool flag_valid = false;
1247 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1248 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1253 for (i = 0; i < 12; i++) {
1254 if (gre_profile[i] == j)
1258 for (i = 0; i < 12; i++) {
1259 if (vxlan_profile[i] == j)
1263 for (i = 0; i < 7; i++) {
1264 if (pppoe_profile[i] == j)
1268 for (i = 0; i < 6; i++) {
1269 if (non_tun_profile[i] == j)
1270 non_tun_valid = true;
1273 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1274 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1277 if ((j >= ICE_PROFID_IPV4_ESP &&
1278 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1279 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1280 j <= ICE_PROFID_IPV6_GTPU_TEID))
1284 if (!non_tun_valid && vxlan_valid)
1285 tun_type = ICE_SW_TUN_VXLAN;
1286 else if (!non_tun_valid && gre_valid)
1287 tun_type = ICE_SW_TUN_NVGRE;
1288 else if (!non_tun_valid && pppoe_valid)
1289 tun_type = ICE_SW_TUN_PPPOE;
1290 else if (!non_tun_valid && gtp_valid)
1291 tun_type = ICE_SW_TUN_GTP;
1292 else if (non_tun_valid &&
1293 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1294 tun_type = ICE_SW_TUN_AND_NON_TUN;
1295 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1297 tun_type = ICE_NON_TUN;
1299 tun_type = ICE_NON_TUN;
1301 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1302 i = ice_is_bit_set(recipe_to_profile[rid],
1303 ICE_PROFID_PPPOE_IPV4_OTHER);
1304 j = ice_is_bit_set(recipe_to_profile[rid],
1305 ICE_PROFID_PPPOE_IPV6_OTHER);
1307 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1309 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1312 if (tun_type == ICE_SW_TUN_GTP) {
1313 if (ice_is_bit_set(recipe_to_profile[rid],
1314 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1315 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1316 else if (ice_is_bit_set(recipe_to_profile[rid],
1317 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1318 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1319 else if (ice_is_bit_set(recipe_to_profile[rid],
1320 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1321 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1322 else if (ice_is_bit_set(recipe_to_profile[rid],
1323 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1324 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1327 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1328 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1329 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1331 case ICE_PROFID_IPV4_TCP:
1332 tun_type = ICE_SW_IPV4_TCP;
1334 case ICE_PROFID_IPV4_UDP:
1335 tun_type = ICE_SW_IPV4_UDP;
1337 case ICE_PROFID_IPV6_TCP:
1338 tun_type = ICE_SW_IPV6_TCP;
1340 case ICE_PROFID_IPV6_UDP:
1341 tun_type = ICE_SW_IPV6_UDP;
1343 case ICE_PROFID_PPPOE_PAY:
1344 tun_type = ICE_SW_TUN_PPPOE_PAY;
1346 case ICE_PROFID_PPPOE_IPV4_TCP:
1347 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1349 case ICE_PROFID_PPPOE_IPV4_UDP:
1350 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1352 case ICE_PROFID_PPPOE_IPV4_OTHER:
1353 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1355 case ICE_PROFID_PPPOE_IPV6_TCP:
1356 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1358 case ICE_PROFID_PPPOE_IPV6_UDP:
1359 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1361 case ICE_PROFID_PPPOE_IPV6_OTHER:
1362 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1364 case ICE_PROFID_IPV4_ESP:
1365 tun_type = ICE_SW_TUN_IPV4_ESP;
1367 case ICE_PROFID_IPV6_ESP:
1368 tun_type = ICE_SW_TUN_IPV6_ESP;
1370 case ICE_PROFID_IPV4_AH:
1371 tun_type = ICE_SW_TUN_IPV4_AH;
1373 case ICE_PROFID_IPV6_AH:
1374 tun_type = ICE_SW_TUN_IPV6_AH;
1376 case ICE_PROFID_IPV4_NAT_T:
1377 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1379 case ICE_PROFID_IPV6_NAT_T:
1380 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1382 case ICE_PROFID_IPV4_PFCP_NODE:
1384 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1386 case ICE_PROFID_IPV6_PFCP_NODE:
1388 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1390 case ICE_PROFID_IPV4_PFCP_SESSION:
1392 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1394 case ICE_PROFID_IPV6_PFCP_SESSION:
1396 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1398 case ICE_PROFID_MAC_IPV4_L2TPV3:
1399 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1401 case ICE_PROFID_MAC_IPV6_L2TPV3:
1402 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1404 case ICE_PROFID_IPV4_GTPU_TEID:
1405 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1407 case ICE_PROFID_IPV6_GTPU_TEID:
1408 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1423 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1424 * @hw: pointer to hardware structure
1425 * @recps: struct that we need to populate
1426 * @rid: recipe ID that we are populating
1427 * @refresh_required: true if we should get recipe to profile mapping from FW
1429 * This function is used to populate all the necessary entries into our
1430 * bookkeeping so that we have a current list of all the recipes that are
1431 * programmed in the firmware.
1433 static enum ice_status
1434 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1435 bool *refresh_required)
1437 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1438 struct ice_aqc_recipe_data_elem *tmp;
1439 u16 num_recps = ICE_MAX_NUM_RECIPES;
1440 struct ice_prot_lkup_ext *lkup_exts;
1441 enum ice_status status;
1445 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1447 /* we need a buffer big enough to accommodate all the recipes */
1448 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1449 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1451 return ICE_ERR_NO_MEMORY;
1453 tmp[0].recipe_indx = rid;
1454 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1455 /* non-zero status meaning recipe doesn't exist */
1459 /* Get recipe to profile map so that we can get the fv from lkups that
1460 * we read for a recipe from FW. Since we want to minimize the number of
1461 * times we make this FW call, just make one call and cache the copy
1462 * until a new recipe is added. This operation is only required the
1463 * first time to get the changes from FW. Then to search existing
1464 * entries we don't need to update the cache again until another recipe
1467 if (*refresh_required) {
1468 ice_get_recp_to_prof_map(hw);
1469 *refresh_required = false;
1472 /* Start populating all the entries for recps[rid] based on lkups from
1473 * firmware. Note that we are only creating the root recipe in our
1476 lkup_exts = &recps[rid].lkup_exts;
1478 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1479 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1480 struct ice_recp_grp_entry *rg_entry;
1481 u8 i, prof, idx, prot = 0;
1485 rg_entry = (struct ice_recp_grp_entry *)
1486 ice_malloc(hw, sizeof(*rg_entry));
1488 status = ICE_ERR_NO_MEMORY;
1492 idx = root_bufs.recipe_indx;
1493 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1495 /* Mark all result indices in this chain */
1496 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1497 ice_set_bit(root_bufs.content.result_indx &
1498 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1500 /* get the first profile that is associated with rid */
1501 prof = ice_find_first_bit(recipe_to_profile[idx],
1502 ICE_MAX_NUM_PROFILES);
1503 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1504 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1506 rg_entry->fv_idx[i] = lkup_indx;
1507 rg_entry->fv_mask[i] =
1508 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1510 /* If the recipe is a chained recipe then all its
1511 * child recipe's result will have a result index.
1512 * To fill fv_words we should not use those result
1513 * index, we only need the protocol ids and offsets.
1514 * We will skip all the fv_idx which stores result
1515 * index in them. We also need to skip any fv_idx which
1516 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1517 * valid offset value.
1519 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1520 rg_entry->fv_idx[i]) ||
1521 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1522 rg_entry->fv_idx[i] == 0)
1525 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1526 rg_entry->fv_idx[i], &prot, &off);
1527 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1528 lkup_exts->fv_words[fv_word_idx].off = off;
1529 lkup_exts->field_mask[fv_word_idx] =
1530 rg_entry->fv_mask[i];
1533 /* populate rg_list with the data from the child entry of this
1536 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1538 /* Propagate some data to the recipe database */
1539 recps[idx].is_root = !!is_root;
1540 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1541 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1542 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1543 recps[idx].chain_idx = root_bufs.content.result_indx &
1544 ~ICE_AQ_RECIPE_RESULT_EN;
1545 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1547 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1553 /* Only do the following for root recipes entries */
1554 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1555 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1556 recps[idx].root_rid = root_bufs.content.rid &
1557 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1558 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1561 /* Complete initialization of the root recipe entry */
1562 lkup_exts->n_val_words = fv_word_idx;
1563 recps[rid].big_recp = (num_recps > 1);
1564 recps[rid].n_grp_count = (u8)num_recps;
1565 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1566 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1567 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1568 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1569 if (!recps[rid].root_buf)
1572 /* Copy result indexes */
1573 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1574 recps[rid].recp_created = true;
1582 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1583 * @hw: pointer to hardware structure
1585 * This function is used to populate recipe_to_profile matrix where index to
1586 * this array is the recipe ID and the element is the mapping of which profiles
1587 * is this recipe mapped to.
1589 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1591 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1594 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1597 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1598 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1599 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1601 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1602 ICE_MAX_NUM_RECIPES);
1603 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1604 ice_set_bit(i, recipe_to_profile[j]);
1609 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1610 * @hw: pointer to the HW struct
1611 * @recp_list: pointer to sw recipe list
1613 * Allocate memory for the entire recipe table and initialize the structures/
1614 * entries corresponding to basic recipes.
1617 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1619 struct ice_sw_recipe *recps;
1622 recps = (struct ice_sw_recipe *)
1623 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1625 return ICE_ERR_NO_MEMORY;
1627 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1628 recps[i].root_rid = i;
1629 INIT_LIST_HEAD(&recps[i].filt_rules);
1630 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1631 INIT_LIST_HEAD(&recps[i].rg_list);
1632 ice_init_lock(&recps[i].filt_rule_lock);
1641 * ice_aq_get_sw_cfg - get switch configuration
1642 * @hw: pointer to the hardware structure
1643 * @buf: pointer to the result buffer
1644 * @buf_size: length of the buffer available for response
1645 * @req_desc: pointer to requested descriptor
1646 * @num_elems: pointer to number of elements
1647 * @cd: pointer to command details structure or NULL
1649 * Get switch configuration (0x0200) to be placed in buf.
1650 * This admin command returns information such as initial VSI/port number
1651 * and switch ID it belongs to.
1653 * NOTE: *req_desc is both an input/output parameter.
1654 * The caller of this function first calls this function with *request_desc set
1655 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1656 * configuration information has been returned; if non-zero (meaning not all
1657 * the information was returned), the caller should call this function again
1658 * with *req_desc set to the previous value returned by f/w to get the
1659 * next block of switch configuration information.
1661 * *num_elems is output only parameter. This reflects the number of elements
1662 * in response buffer. The caller of this function to use *num_elems while
1663 * parsing the response buffer.
1665 static enum ice_status
1666 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1667 u16 buf_size, u16 *req_desc, u16 *num_elems,
1668 struct ice_sq_cd *cd)
1670 struct ice_aqc_get_sw_cfg *cmd;
1671 struct ice_aq_desc desc;
1672 enum ice_status status;
1674 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1675 cmd = &desc.params.get_sw_conf;
1676 cmd->element = CPU_TO_LE16(*req_desc);
1678 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1680 *req_desc = LE16_TO_CPU(cmd->element);
1681 *num_elems = LE16_TO_CPU(cmd->num_elems);
1688 * ice_alloc_sw - allocate resources specific to switch
1689 * @hw: pointer to the HW struct
1690 * @ena_stats: true to turn on VEB stats
1691 * @shared_res: true for shared resource, false for dedicated resource
1692 * @sw_id: switch ID returned
1693 * @counter_id: VEB counter ID returned
1695 * allocates switch resources (SWID and VEB counter) (0x0208)
1698 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1701 struct ice_aqc_alloc_free_res_elem *sw_buf;
1702 struct ice_aqc_res_elem *sw_ele;
1703 enum ice_status status;
1706 buf_len = ice_struct_size(sw_buf, elem, 1);
1707 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1709 return ICE_ERR_NO_MEMORY;
1711 /* Prepare buffer for switch ID.
1712 * The number of resource entries in buffer is passed as 1 since only a
1713 * single switch/VEB instance is allocated, and hence a single sw_id
1716 sw_buf->num_elems = CPU_TO_LE16(1);
1718 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1719 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1720 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1722 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1723 ice_aqc_opc_alloc_res, NULL);
1726 goto ice_alloc_sw_exit;
1728 sw_ele = &sw_buf->elem[0];
1729 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1732 /* Prepare buffer for VEB Counter */
1733 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1734 struct ice_aqc_alloc_free_res_elem *counter_buf;
1735 struct ice_aqc_res_elem *counter_ele;
1737 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1738 ice_malloc(hw, buf_len);
1740 status = ICE_ERR_NO_MEMORY;
1741 goto ice_alloc_sw_exit;
1744 /* The number of resource entries in buffer is passed as 1 since
1745 * only a single switch/VEB instance is allocated, and hence a
1746 * single VEB counter is requested.
1748 counter_buf->num_elems = CPU_TO_LE16(1);
1749 counter_buf->res_type =
1750 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1751 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1752 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1756 ice_free(hw, counter_buf);
1757 goto ice_alloc_sw_exit;
1759 counter_ele = &counter_buf->elem[0];
1760 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1761 ice_free(hw, counter_buf);
1765 ice_free(hw, sw_buf);
1770 * ice_free_sw - free resources specific to switch
1771 * @hw: pointer to the HW struct
1772 * @sw_id: switch ID returned
1773 * @counter_id: VEB counter ID returned
1775 * free switch resources (SWID and VEB counter) (0x0209)
1777 * NOTE: This function frees multiple resources. It continues
1778 * releasing other resources even after it encounters error.
1779 * The error code returned is the last error it encountered.
1781 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1783 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1784 enum ice_status status, ret_status;
1787 buf_len = ice_struct_size(sw_buf, elem, 1);
1788 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1790 return ICE_ERR_NO_MEMORY;
1792 /* Prepare buffer to free for switch ID res.
1793 * The number of resource entries in buffer is passed as 1 since only a
1794 * single switch/VEB instance is freed, and hence a single sw_id
1797 sw_buf->num_elems = CPU_TO_LE16(1);
1798 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1799 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1801 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802 ice_aqc_opc_free_res, NULL);
1805 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1807 /* Prepare buffer to free for VEB Counter resource */
1808 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1809 ice_malloc(hw, buf_len);
1811 ice_free(hw, sw_buf);
1812 return ICE_ERR_NO_MEMORY;
1815 /* The number of resource entries in buffer is passed as 1 since only a
1816 * single switch/VEB instance is freed, and hence a single VEB counter
1819 counter_buf->num_elems = CPU_TO_LE16(1);
1820 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1821 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1823 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1824 ice_aqc_opc_free_res, NULL);
1826 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
1827 ret_status = status;
1830 ice_free(hw, counter_buf);
1831 ice_free(hw, sw_buf);
1837 * @hw: pointer to the HW struct
1838 * @vsi_ctx: pointer to a VSI context struct
1839 * @cd: pointer to command details structure or NULL
1841 * Add a VSI context to the hardware (0x0210)
1844 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1845 struct ice_sq_cd *cd)
1847 struct ice_aqc_add_update_free_vsi_resp *res;
1848 struct ice_aqc_add_get_update_free_vsi *cmd;
1849 struct ice_aq_desc desc;
1850 enum ice_status status;
1852 cmd = &desc.params.vsi_cmd;
1853 res = &desc.params.add_update_free_vsi_res;
1855 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1857 if (!vsi_ctx->alloc_from_pool)
1858 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1859 ICE_AQ_VSI_IS_VALID);
1861 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1863 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1865 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1866 sizeof(vsi_ctx->info), cd);
1869 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1870 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1871 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1879 * @hw: pointer to the HW struct
1880 * @vsi_ctx: pointer to a VSI context struct
1881 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1882 * @cd: pointer to command details structure or NULL
1884 * Free VSI context info from hardware (0x0213)
1887 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1888 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1890 struct ice_aqc_add_update_free_vsi_resp *resp;
1891 struct ice_aqc_add_get_update_free_vsi *cmd;
1892 struct ice_aq_desc desc;
1893 enum ice_status status;
1895 cmd = &desc.params.vsi_cmd;
1896 resp = &desc.params.add_update_free_vsi_res;
1898 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1900 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1902 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1904 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1906 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1907 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1915 * @hw: pointer to the HW struct
1916 * @vsi_ctx: pointer to a VSI context struct
1917 * @cd: pointer to command details structure or NULL
1919 * Update VSI context in the hardware (0x0211)
1922 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1923 struct ice_sq_cd *cd)
1925 struct ice_aqc_add_update_free_vsi_resp *resp;
1926 struct ice_aqc_add_get_update_free_vsi *cmd;
1927 struct ice_aq_desc desc;
1928 enum ice_status status;
1930 cmd = &desc.params.vsi_cmd;
1931 resp = &desc.params.add_update_free_vsi_res;
1933 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1935 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1937 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1939 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1940 sizeof(vsi_ctx->info), cd);
1943 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1944 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1951 * ice_is_vsi_valid - check whether the VSI is valid or not
1952 * @hw: pointer to the HW struct
1953 * @vsi_handle: VSI handle
1955 * check whether the VSI is valid or not
1957 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1959 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1963 * ice_get_hw_vsi_num - return the HW VSI number
1964 * @hw: pointer to the HW struct
1965 * @vsi_handle: VSI handle
1967 * return the HW VSI number
1968 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1970 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1972 return hw->vsi_ctx[vsi_handle]->vsi_num;
1976 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1977 * @hw: pointer to the HW struct
1978 * @vsi_handle: VSI handle
1980 * return the VSI context entry for a given VSI handle
1982 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1984 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1988 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1989 * @hw: pointer to the HW struct
1990 * @vsi_handle: VSI handle
1991 * @vsi: VSI context pointer
1993 * save the VSI context entry for a given VSI handle
1996 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1998 hw->vsi_ctx[vsi_handle] = vsi;
2002 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2003 * @hw: pointer to the HW struct
2004 * @vsi_handle: VSI handle
2006 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2008 struct ice_vsi_ctx *vsi;
2011 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2014 ice_for_each_traffic_class(i) {
2015 if (vsi->lan_q_ctx[i]) {
2016 ice_free(hw, vsi->lan_q_ctx[i]);
2017 vsi->lan_q_ctx[i] = NULL;
2023 * ice_clear_vsi_ctx - clear the VSI context entry
2024 * @hw: pointer to the HW struct
2025 * @vsi_handle: VSI handle
2027 * clear the VSI context entry
2029 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2031 struct ice_vsi_ctx *vsi;
2033 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2035 ice_clear_vsi_q_ctx(hw, vsi_handle);
2037 hw->vsi_ctx[vsi_handle] = NULL;
2042 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2043 * @hw: pointer to the HW struct
2045 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2049 for (i = 0; i < ICE_MAX_VSI; i++)
2050 ice_clear_vsi_ctx(hw, i);
2054 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2055 * @hw: pointer to the HW struct
2056 * @vsi_handle: unique VSI handle provided by drivers
2057 * @vsi_ctx: pointer to a VSI context struct
2058 * @cd: pointer to command details structure or NULL
2060 * Add a VSI context to the hardware also add it into the VSI handle list.
2061 * If this function gets called after reset for existing VSIs then update
2062 * with the new HW VSI number in the corresponding VSI handle list entry.
2065 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2066 struct ice_sq_cd *cd)
2068 struct ice_vsi_ctx *tmp_vsi_ctx;
2069 enum ice_status status;
2071 if (vsi_handle >= ICE_MAX_VSI)
2072 return ICE_ERR_PARAM;
2073 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2076 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2078 /* Create a new VSI context */
2079 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2080 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2082 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2083 return ICE_ERR_NO_MEMORY;
2085 *tmp_vsi_ctx = *vsi_ctx;
2087 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2089 /* update with new HW VSI num */
2090 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2097 * ice_free_vsi- free VSI context from hardware and VSI handle list
2098 * @hw: pointer to the HW struct
2099 * @vsi_handle: unique VSI handle
2100 * @vsi_ctx: pointer to a VSI context struct
2101 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2102 * @cd: pointer to command details structure or NULL
2104 * Free VSI context info from hardware as well as from VSI handle list
2107 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2108 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2110 enum ice_status status;
2112 if (!ice_is_vsi_valid(hw, vsi_handle))
2113 return ICE_ERR_PARAM;
2114 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2115 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2117 ice_clear_vsi_ctx(hw, vsi_handle);
2123 * @hw: pointer to the HW struct
2124 * @vsi_handle: unique VSI handle
2125 * @vsi_ctx: pointer to a VSI context struct
2126 * @cd: pointer to command details structure or NULL
2128 * Update VSI context in the hardware
2131 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2132 struct ice_sq_cd *cd)
2134 if (!ice_is_vsi_valid(hw, vsi_handle))
2135 return ICE_ERR_PARAM;
2136 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2137 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2141 * ice_aq_get_vsi_params
2142 * @hw: pointer to the HW struct
2143 * @vsi_ctx: pointer to a VSI context struct
2144 * @cd: pointer to command details structure or NULL
2146 * Get VSI context info from hardware (0x0212)
2149 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2150 struct ice_sq_cd *cd)
2152 struct ice_aqc_add_get_update_free_vsi *cmd;
2153 struct ice_aqc_get_vsi_resp *resp;
2154 struct ice_aq_desc desc;
2155 enum ice_status status;
2157 cmd = &desc.params.vsi_cmd;
2158 resp = &desc.params.get_vsi_resp;
2160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2162 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2164 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2165 sizeof(vsi_ctx->info), cd);
2167 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2169 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2170 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2177 * ice_aq_add_update_mir_rule - add/update a mirror rule
2178 * @hw: pointer to the HW struct
2179 * @rule_type: Rule Type
2180 * @dest_vsi: VSI number to which packets will be mirrored
2181 * @count: length of the list
2182 * @mr_buf: buffer for list of mirrored VSI numbers
2183 * @cd: pointer to command details structure or NULL
2186 * Add/Update Mirror Rule (0x260).
2189 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2190 u16 count, struct ice_mir_rule_buf *mr_buf,
2191 struct ice_sq_cd *cd, u16 *rule_id)
2193 struct ice_aqc_add_update_mir_rule *cmd;
2194 struct ice_aq_desc desc;
2195 enum ice_status status;
2196 __le16 *mr_list = NULL;
2199 switch (rule_type) {
2200 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2201 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2202 /* Make sure count and mr_buf are set for these rule_types */
2203 if (!(count && mr_buf))
2204 return ICE_ERR_PARAM;
2206 buf_size = count * sizeof(__le16);
2207 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2209 return ICE_ERR_NO_MEMORY;
2211 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2212 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2213 /* Make sure count and mr_buf are not set for these
2216 if (count || mr_buf)
2217 return ICE_ERR_PARAM;
2220 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2221 return ICE_ERR_OUT_OF_RANGE;
2224 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2226 /* Pre-process 'mr_buf' items for add/update of virtual port
2227 * ingress/egress mirroring (but not physical port ingress/egress
2233 for (i = 0; i < count; i++) {
2236 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2238 /* Validate specified VSI number, make sure it is less
2239 * than ICE_MAX_VSI, if not return with error.
2241 if (id >= ICE_MAX_VSI) {
2242 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2244 ice_free(hw, mr_list);
2245 return ICE_ERR_OUT_OF_RANGE;
2248 /* add VSI to mirror rule */
2251 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2252 else /* remove VSI from mirror rule */
2253 mr_list[i] = CPU_TO_LE16(id);
2257 cmd = &desc.params.add_update_rule;
2258 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2259 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2260 ICE_AQC_RULE_ID_VALID_M);
2261 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2262 cmd->num_entries = CPU_TO_LE16(count);
2263 cmd->dest = CPU_TO_LE16(dest_vsi);
2265 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2267 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2269 ice_free(hw, mr_list);
2275 * ice_aq_delete_mir_rule - delete a mirror rule
2276 * @hw: pointer to the HW struct
2277 * @rule_id: Mirror rule ID (to be deleted)
2278 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2279 * otherwise it is returned to the shared pool
2280 * @cd: pointer to command details structure or NULL
2282 * Delete Mirror Rule (0x261).
2285 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2286 struct ice_sq_cd *cd)
2288 struct ice_aqc_delete_mir_rule *cmd;
2289 struct ice_aq_desc desc;
2291 /* rule_id should be in the range 0...63 */
2292 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2293 return ICE_ERR_OUT_OF_RANGE;
2295 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2297 cmd = &desc.params.del_rule;
2298 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2299 cmd->rule_id = CPU_TO_LE16(rule_id);
2302 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2304 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2308 * ice_aq_alloc_free_vsi_list
2309 * @hw: pointer to the HW struct
2310 * @vsi_list_id: VSI list ID returned or used for lookup
2311 * @lkup_type: switch rule filter lookup type
2312 * @opc: switch rules population command type - pass in the command opcode
2314 * allocates or free a VSI list resource
2316 static enum ice_status
2317 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2318 enum ice_sw_lkup_type lkup_type,
2319 enum ice_adminq_opc opc)
2321 struct ice_aqc_alloc_free_res_elem *sw_buf;
2322 struct ice_aqc_res_elem *vsi_ele;
2323 enum ice_status status;
2326 buf_len = ice_struct_size(sw_buf, elem, 1);
2327 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2329 return ICE_ERR_NO_MEMORY;
2330 sw_buf->num_elems = CPU_TO_LE16(1);
2332 if (lkup_type == ICE_SW_LKUP_MAC ||
2333 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2334 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2335 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2336 lkup_type == ICE_SW_LKUP_PROMISC ||
2337 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2338 lkup_type == ICE_SW_LKUP_LAST) {
2339 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2340 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2342 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2344 status = ICE_ERR_PARAM;
2345 goto ice_aq_alloc_free_vsi_list_exit;
2348 if (opc == ice_aqc_opc_free_res)
2349 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2351 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2353 goto ice_aq_alloc_free_vsi_list_exit;
2355 if (opc == ice_aqc_opc_alloc_res) {
2356 vsi_ele = &sw_buf->elem[0];
2357 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2360 ice_aq_alloc_free_vsi_list_exit:
2361 ice_free(hw, sw_buf);
2366 * ice_aq_set_storm_ctrl - Sets storm control configuration
2367 * @hw: pointer to the HW struct
2368 * @bcast_thresh: represents the upper threshold for broadcast storm control
2369 * @mcast_thresh: represents the upper threshold for multicast storm control
2370 * @ctl_bitmask: storm control knobs
2372 * Sets the storm control configuration (0x0280)
2375 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2378 struct ice_aqc_storm_cfg *cmd;
2379 struct ice_aq_desc desc;
2381 cmd = &desc.params.storm_conf;
2383 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2385 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2386 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2387 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2389 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2393 * ice_aq_get_storm_ctrl - gets storm control configuration
2394 * @hw: pointer to the HW struct
2395 * @bcast_thresh: represents the upper threshold for broadcast storm control
2396 * @mcast_thresh: represents the upper threshold for multicast storm control
2397 * @ctl_bitmask: storm control knobs
2399 * Gets the storm control configuration (0x0281)
2402 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2405 enum ice_status status;
2406 struct ice_aq_desc desc;
2408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2410 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2412 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2415 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2418 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2421 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2428 * ice_aq_sw_rules - add/update/remove switch rules
2429 * @hw: pointer to the HW struct
2430 * @rule_list: pointer to switch rule population list
2431 * @rule_list_sz: total size of the rule list in bytes
2432 * @num_rules: number of switch rules in the rule_list
2433 * @opc: switch rules population command type - pass in the command opcode
2434 * @cd: pointer to command details structure or NULL
2436 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2438 static enum ice_status
2439 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2440 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2442 struct ice_aq_desc desc;
2443 enum ice_status status;
2445 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2447 if (opc != ice_aqc_opc_add_sw_rules &&
2448 opc != ice_aqc_opc_update_sw_rules &&
2449 opc != ice_aqc_opc_remove_sw_rules)
2450 return ICE_ERR_PARAM;
2452 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2454 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2455 desc.params.sw_rules.num_rules_fltr_entry_index =
2456 CPU_TO_LE16(num_rules);
2457 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2458 if (opc != ice_aqc_opc_add_sw_rules &&
2459 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2460 status = ICE_ERR_DOES_NOT_EXIST;
2466 * ice_aq_add_recipe - add switch recipe
2467 * @hw: pointer to the HW struct
2468 * @s_recipe_list: pointer to switch rule population list
2469 * @num_recipes: number of switch recipes in the list
2470 * @cd: pointer to command details structure or NULL
2475 ice_aq_add_recipe(struct ice_hw *hw,
2476 struct ice_aqc_recipe_data_elem *s_recipe_list,
2477 u16 num_recipes, struct ice_sq_cd *cd)
2479 struct ice_aqc_add_get_recipe *cmd;
2480 struct ice_aq_desc desc;
2483 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2484 cmd = &desc.params.add_get_recipe;
2485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2487 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2488 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2490 buf_size = num_recipes * sizeof(*s_recipe_list);
2492 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2496 * ice_aq_get_recipe - get switch recipe
2497 * @hw: pointer to the HW struct
2498 * @s_recipe_list: pointer to switch rule population list
2499 * @num_recipes: pointer to the number of recipes (input and output)
2500 * @recipe_root: root recipe number of recipe(s) to retrieve
2501 * @cd: pointer to command details structure or NULL
2505 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2506 * On output, *num_recipes will equal the number of entries returned in
2509 * The caller must supply enough space in s_recipe_list to hold all possible
2510 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2513 ice_aq_get_recipe(struct ice_hw *hw,
2514 struct ice_aqc_recipe_data_elem *s_recipe_list,
2515 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2517 struct ice_aqc_add_get_recipe *cmd;
2518 struct ice_aq_desc desc;
2519 enum ice_status status;
2522 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2523 return ICE_ERR_PARAM;
2525 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2526 cmd = &desc.params.add_get_recipe;
2527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2529 cmd->return_index = CPU_TO_LE16(recipe_root);
2530 cmd->num_sub_recipes = 0;
2532 buf_size = *num_recipes * sizeof(*s_recipe_list);
2534 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2535 /* cppcheck-suppress constArgument */
2536 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2542 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2543 * @hw: pointer to the HW struct
2544 * @profile_id: package profile ID to associate the recipe with
2545 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2546 * @cd: pointer to command details structure or NULL
2547 * Recipe to profile association (0x0291)
2550 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2551 struct ice_sq_cd *cd)
2553 struct ice_aqc_recipe_to_profile *cmd;
2554 struct ice_aq_desc desc;
2556 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2557 cmd = &desc.params.recipe_to_profile;
2558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2559 cmd->profile_id = CPU_TO_LE16(profile_id);
2560 /* Set the recipe ID bit in the bitmask to let the device know which
2561 * profile we are associating the recipe to
2563 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2564 ICE_NONDMA_TO_NONDMA);
2566 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2570 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2571 * @hw: pointer to the HW struct
2572 * @profile_id: package profile ID to associate the recipe with
2573 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2574 * @cd: pointer to command details structure or NULL
2575 * Associate profile ID with given recipe (0x0293)
2578 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2579 struct ice_sq_cd *cd)
2581 struct ice_aqc_recipe_to_profile *cmd;
2582 struct ice_aq_desc desc;
2583 enum ice_status status;
2585 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2586 cmd = &desc.params.recipe_to_profile;
2587 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2588 cmd->profile_id = CPU_TO_LE16(profile_id);
2590 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2592 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2593 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2599 * ice_alloc_recipe - add recipe resource
2600 * @hw: pointer to the hardware structure
2601 * @rid: recipe ID returned as response to AQ call
2603 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2605 struct ice_aqc_alloc_free_res_elem *sw_buf;
2606 enum ice_status status;
2609 buf_len = ice_struct_size(sw_buf, elem, 1);
2610 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2612 return ICE_ERR_NO_MEMORY;
2614 sw_buf->num_elems = CPU_TO_LE16(1);
2615 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2616 ICE_AQC_RES_TYPE_S) |
2617 ICE_AQC_RES_TYPE_FLAG_SHARED);
2618 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2619 ice_aqc_opc_alloc_res, NULL);
2621 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2622 ice_free(hw, sw_buf);
2627 /* ice_init_port_info - Initialize port_info with switch configuration data
2628 * @pi: pointer to port_info
2629 * @vsi_port_num: VSI number or port number
2630 * @type: Type of switch element (port or VSI)
2631 * @swid: switch ID of the switch the element is attached to
2632 * @pf_vf_num: PF or VF number
2633 * @is_vf: true if the element is a VF, false otherwise
2636 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2637 u16 swid, u16 pf_vf_num, bool is_vf)
2640 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2641 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2643 pi->pf_vf_num = pf_vf_num;
2645 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2646 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2649 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2654 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2655 * @hw: pointer to the hardware structure
2657 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2659 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2660 enum ice_status status;
2667 num_total_ports = 1;
2669 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2670 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2673 return ICE_ERR_NO_MEMORY;
2675 /* Multiple calls to ice_aq_get_sw_cfg may be required
2676 * to get all the switch configuration information. The need
2677 * for additional calls is indicated by ice_aq_get_sw_cfg
2678 * writing a non-zero value in req_desc
2681 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2683 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2684 &req_desc, &num_elems, NULL);
2689 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2690 u16 pf_vf_num, swid, vsi_port_num;
2694 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2695 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2697 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2698 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2700 swid = LE16_TO_CPU(ele->swid);
2702 if (LE16_TO_CPU(ele->pf_vf_num) &
2703 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2706 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2707 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2710 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2711 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2712 if (j == num_total_ports) {
2713 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2714 status = ICE_ERR_CFG;
2717 ice_init_port_info(hw->port_info,
2718 vsi_port_num, res_type, swid,
2726 } while (req_desc && !status);
2729 ice_free(hw, (void *)rbuf);
2734 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2735 * @hw: pointer to the hardware structure
2736 * @fi: filter info structure to fill/update
2738 * This helper function populates the lb_en and lan_en elements of the provided
2739 * ice_fltr_info struct using the switch's type and characteristics of the
2740 * switch rule being configured.
2742 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2744 if ((fi->flag & ICE_FLTR_RX) &&
2745 (fi->fltr_act == ICE_FWD_TO_VSI ||
2746 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2747 fi->lkup_type == ICE_SW_LKUP_LAST)
2751 if ((fi->flag & ICE_FLTR_TX) &&
2752 (fi->fltr_act == ICE_FWD_TO_VSI ||
2753 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2754 fi->fltr_act == ICE_FWD_TO_Q ||
2755 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2756 /* Setting LB for prune actions will result in replicated
2757 * packets to the internal switch that will be dropped.
2759 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2762 /* Set lan_en to TRUE if
2763 * 1. The switch is a VEB AND
2765 * 2.1 The lookup is a directional lookup like ethertype,
2766 * promiscuous, ethertype-MAC, promiscuous-VLAN
2767 * and default-port OR
2768 * 2.2 The lookup is VLAN, OR
2769 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2770 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2774 * The switch is a VEPA.
2776 * In all other cases, the LAN enable has to be set to false.
2779 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2780 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2781 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2782 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2783 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2784 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2785 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2786 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2787 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2788 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2797 * ice_fill_sw_rule - Helper function to fill switch rule structure
2798 * @hw: pointer to the hardware structure
2799 * @f_info: entry containing packet forwarding information
2800 * @s_rule: switch rule structure to be filled in based on mac_entry
2801 * @opc: switch rules population command type - pass in the command opcode
2804 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2805 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2807 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2815 if (opc == ice_aqc_opc_remove_sw_rules) {
2816 s_rule->pdata.lkup_tx_rx.act = 0;
2817 s_rule->pdata.lkup_tx_rx.index =
2818 CPU_TO_LE16(f_info->fltr_rule_id);
2819 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2823 eth_hdr_sz = sizeof(dummy_eth_header);
2824 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2826 /* initialize the ether header with a dummy header */
2827 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2828 ice_fill_sw_info(hw, f_info);
2830 switch (f_info->fltr_act) {
2831 case ICE_FWD_TO_VSI:
2832 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2833 ICE_SINGLE_ACT_VSI_ID_M;
2834 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2835 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2836 ICE_SINGLE_ACT_VALID_BIT;
2838 case ICE_FWD_TO_VSI_LIST:
2839 act |= ICE_SINGLE_ACT_VSI_LIST;
2840 act |= (f_info->fwd_id.vsi_list_id <<
2841 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2842 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2843 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2844 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2845 ICE_SINGLE_ACT_VALID_BIT;
2848 act |= ICE_SINGLE_ACT_TO_Q;
2849 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2850 ICE_SINGLE_ACT_Q_INDEX_M;
2852 case ICE_DROP_PACKET:
2853 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2854 ICE_SINGLE_ACT_VALID_BIT;
2856 case ICE_FWD_TO_QGRP:
2857 q_rgn = f_info->qgrp_size > 0 ?
2858 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2859 act |= ICE_SINGLE_ACT_TO_Q;
2860 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2861 ICE_SINGLE_ACT_Q_INDEX_M;
2862 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2863 ICE_SINGLE_ACT_Q_REGION_M;
2870 act |= ICE_SINGLE_ACT_LB_ENABLE;
2872 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2874 switch (f_info->lkup_type) {
2875 case ICE_SW_LKUP_MAC:
2876 daddr = f_info->l_data.mac.mac_addr;
2878 case ICE_SW_LKUP_VLAN:
2879 vlan_id = f_info->l_data.vlan.vlan_id;
2880 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2881 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2882 act |= ICE_SINGLE_ACT_PRUNE;
2883 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2886 case ICE_SW_LKUP_ETHERTYPE_MAC:
2887 daddr = f_info->l_data.ethertype_mac.mac_addr;
2889 case ICE_SW_LKUP_ETHERTYPE:
2890 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2891 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2893 case ICE_SW_LKUP_MAC_VLAN:
2894 daddr = f_info->l_data.mac_vlan.mac_addr;
2895 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2897 case ICE_SW_LKUP_PROMISC_VLAN:
2898 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2900 case ICE_SW_LKUP_PROMISC:
2901 daddr = f_info->l_data.mac_vlan.mac_addr;
2907 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2908 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2909 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2911 /* Recipe set depending on lookup type */
2912 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2913 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2914 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2917 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2918 ICE_NONDMA_TO_NONDMA);
2920 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2921 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2922 *off = CPU_TO_BE16(vlan_id);
2925 /* Create the switch rule with the final dummy Ethernet header */
2926 if (opc != ice_aqc_opc_update_sw_rules)
2927 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2931 * ice_add_marker_act
2932 * @hw: pointer to the hardware structure
2933 * @m_ent: the management entry for which sw marker needs to be added
2934 * @sw_marker: sw marker to tag the Rx descriptor with
2935 * @l_id: large action resource ID
2937 * Create a large action to hold software marker and update the switch rule
2938 * entry pointed by m_ent with newly created large action
2940 static enum ice_status
2941 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2942 u16 sw_marker, u16 l_id)
2944 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2945 /* For software marker we need 3 large actions
2946 * 1. FWD action: FWD TO VSI or VSI LIST
2947 * 2. GENERIC VALUE action to hold the profile ID
2948 * 3. GENERIC VALUE action to hold the software marker ID
2950 const u16 num_lg_acts = 3;
2951 enum ice_status status;
2957 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2958 return ICE_ERR_PARAM;
2960 /* Create two back-to-back switch rules and submit them to the HW using
2961 * one memory buffer:
2965 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2966 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2967 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2969 return ICE_ERR_NO_MEMORY;
2971 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2973 /* Fill in the first switch rule i.e. large action */
2974 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2975 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2976 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2978 /* First action VSI forwarding or VSI list forwarding depending on how
2981 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2982 m_ent->fltr_info.fwd_id.hw_vsi_id;
2984 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2985 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2986 ICE_LG_ACT_VSI_LIST_ID_M;
2987 if (m_ent->vsi_count > 1)
2988 act |= ICE_LG_ACT_VSI_LIST;
2989 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2991 /* Second action descriptor type */
2992 act = ICE_LG_ACT_GENERIC;
2994 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2995 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2997 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2998 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3000 /* Third action Marker value */
3001 act |= ICE_LG_ACT_GENERIC;
3002 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3003 ICE_LG_ACT_GENERIC_VALUE_M;
3005 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3007 /* call the fill switch rule to fill the lookup Tx Rx structure */
3008 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3009 ice_aqc_opc_update_sw_rules);
3011 /* Update the action to point to the large action ID */
3012 rx_tx->pdata.lkup_tx_rx.act =
3013 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3014 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3015 ICE_SINGLE_ACT_PTR_VAL_M));
3017 /* Use the filter rule ID of the previously created rule with single
3018 * act. Once the update happens, hardware will treat this as large
3021 rx_tx->pdata.lkup_tx_rx.index =
3022 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3024 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3025 ice_aqc_opc_update_sw_rules, NULL);
3027 m_ent->lg_act_idx = l_id;
3028 m_ent->sw_marker_id = sw_marker;
3031 ice_free(hw, lg_act);
3036 * ice_add_counter_act - add/update filter rule with counter action
3037 * @hw: pointer to the hardware structure
3038 * @m_ent: the management entry for which counter needs to be added
3039 * @counter_id: VLAN counter ID returned as part of allocate resource
3040 * @l_id: large action resource ID
3042 static enum ice_status
3043 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3044 u16 counter_id, u16 l_id)
3046 struct ice_aqc_sw_rules_elem *lg_act;
3047 struct ice_aqc_sw_rules_elem *rx_tx;
3048 enum ice_status status;
3049 /* 2 actions will be added while adding a large action counter */
3050 const int num_acts = 2;
3057 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3058 return ICE_ERR_PARAM;
3060 /* Create two back-to-back switch rules and submit them to the HW using
3061 * one memory buffer:
3065 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3066 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3067 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
3070 return ICE_ERR_NO_MEMORY;
3072 rx_tx = (struct ice_aqc_sw_rules_elem *)
3073 ((u8 *)lg_act + lg_act_size);
3075 /* Fill in the first switch rule i.e. large action */
3076 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3077 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3078 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3080 /* First action VSI forwarding or VSI list forwarding depending on how
3083 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3084 m_ent->fltr_info.fwd_id.hw_vsi_id;
3086 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3087 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3088 ICE_LG_ACT_VSI_LIST_ID_M;
3089 if (m_ent->vsi_count > 1)
3090 act |= ICE_LG_ACT_VSI_LIST;
3091 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3093 /* Second action counter ID */
3094 act = ICE_LG_ACT_STAT_COUNT;
3095 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3096 ICE_LG_ACT_STAT_COUNT_M;
3097 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3099 /* call the fill switch rule to fill the lookup Tx Rx structure */
3100 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3101 ice_aqc_opc_update_sw_rules);
3103 act = ICE_SINGLE_ACT_PTR;
3104 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3105 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3107 /* Use the filter rule ID of the previously created rule with single
3108 * act. Once the update happens, hardware will treat this as large
3111 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3112 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3114 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3115 ice_aqc_opc_update_sw_rules, NULL);
3117 m_ent->lg_act_idx = l_id;
3118 m_ent->counter_index = counter_id;
3121 ice_free(hw, lg_act);
3126 * ice_create_vsi_list_map
3127 * @hw: pointer to the hardware structure
3128 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3129 * @num_vsi: number of VSI handles in the array
3130 * @vsi_list_id: VSI list ID generated as part of allocate resource
3132 * Helper function to create a new entry of VSI list ID to VSI mapping
3133 * using the given VSI list ID
3135 static struct ice_vsi_list_map_info *
3136 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3139 struct ice_switch_info *sw = hw->switch_info;
3140 struct ice_vsi_list_map_info *v_map;
3143 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3148 v_map->vsi_list_id = vsi_list_id;
3150 for (i = 0; i < num_vsi; i++)
3151 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3153 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3158 * ice_update_vsi_list_rule
3159 * @hw: pointer to the hardware structure
3160 * @vsi_handle_arr: array of VSI handles to form a VSI list
3161 * @num_vsi: number of VSI handles in the array
3162 * @vsi_list_id: VSI list ID generated as part of allocate resource
3163 * @remove: Boolean value to indicate if this is a remove action
3164 * @opc: switch rules population command type - pass in the command opcode
3165 * @lkup_type: lookup type of the filter
3167 * Call AQ command to add a new switch rule or update existing switch rule
3168 * using the given VSI list ID
3170 static enum ice_status
3171 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3172 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3173 enum ice_sw_lkup_type lkup_type)
3175 struct ice_aqc_sw_rules_elem *s_rule;
3176 enum ice_status status;
3182 return ICE_ERR_PARAM;
3184 if (lkup_type == ICE_SW_LKUP_MAC ||
3185 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3186 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3187 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3188 lkup_type == ICE_SW_LKUP_PROMISC ||
3189 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3190 lkup_type == ICE_SW_LKUP_LAST)
3191 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3192 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3193 else if (lkup_type == ICE_SW_LKUP_VLAN)
3194 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3195 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3197 return ICE_ERR_PARAM;
3199 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3200 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3202 return ICE_ERR_NO_MEMORY;
3203 for (i = 0; i < num_vsi; i++) {
3204 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3205 status = ICE_ERR_PARAM;
3208 /* AQ call requires hw_vsi_id(s) */
3209 s_rule->pdata.vsi_list.vsi[i] =
3210 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3213 s_rule->type = CPU_TO_LE16(rule_type);
3214 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3215 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3217 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3220 ice_free(hw, s_rule);
3225 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3226 * @hw: pointer to the HW struct
3227 * @vsi_handle_arr: array of VSI handles to form a VSI list
3228 * @num_vsi: number of VSI handles in the array
3229 * @vsi_list_id: stores the ID of the VSI list to be created
3230 * @lkup_type: switch rule filter's lookup type
3232 static enum ice_status
3233 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3234 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3236 enum ice_status status;
3238 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3239 ice_aqc_opc_alloc_res);
3243 /* Update the newly created VSI list to include the specified VSIs */
3244 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3245 *vsi_list_id, false,
3246 ice_aqc_opc_add_sw_rules, lkup_type);
3250 * ice_create_pkt_fwd_rule
3251 * @hw: pointer to the hardware structure
3252 * @recp_list: corresponding filter management list
3253 * @f_entry: entry containing packet forwarding information
3255 * Create switch rule with given filter information and add an entry
3256 * to the corresponding filter management list to track this switch rule
3259 static enum ice_status
3260 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3261 struct ice_fltr_list_entry *f_entry)
3263 struct ice_fltr_mgmt_list_entry *fm_entry;
3264 struct ice_aqc_sw_rules_elem *s_rule;
3265 enum ice_status status;
3267 s_rule = (struct ice_aqc_sw_rules_elem *)
3268 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3270 return ICE_ERR_NO_MEMORY;
3271 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3272 ice_malloc(hw, sizeof(*fm_entry));
3274 status = ICE_ERR_NO_MEMORY;
3275 goto ice_create_pkt_fwd_rule_exit;
3278 fm_entry->fltr_info = f_entry->fltr_info;
3280 /* Initialize all the fields for the management entry */
3281 fm_entry->vsi_count = 1;
3282 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3283 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3284 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3286 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3287 ice_aqc_opc_add_sw_rules);
3289 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3290 ice_aqc_opc_add_sw_rules, NULL);
3292 ice_free(hw, fm_entry);
3293 goto ice_create_pkt_fwd_rule_exit;
3296 f_entry->fltr_info.fltr_rule_id =
3297 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3298 fm_entry->fltr_info.fltr_rule_id =
3299 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3301 /* The book keeping entries will get removed when base driver
3302 * calls remove filter AQ command
3304 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3306 ice_create_pkt_fwd_rule_exit:
3307 ice_free(hw, s_rule);
3312 * ice_update_pkt_fwd_rule
3313 * @hw: pointer to the hardware structure
3314 * @f_info: filter information for switch rule
3316 * Call AQ command to update a previously created switch rule with a
3319 static enum ice_status
3320 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3322 struct ice_aqc_sw_rules_elem *s_rule;
3323 enum ice_status status;
3325 s_rule = (struct ice_aqc_sw_rules_elem *)
3326 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3328 return ICE_ERR_NO_MEMORY;
3330 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3332 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3334 /* Update switch rule with new rule set to forward VSI list */
3335 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3336 ice_aqc_opc_update_sw_rules, NULL);
3338 ice_free(hw, s_rule);
3343 * ice_update_sw_rule_bridge_mode
3344 * @hw: pointer to the HW struct
3346 * Updates unicast switch filter rules based on VEB/VEPA mode
3348 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3350 struct ice_switch_info *sw = hw->switch_info;
3351 struct ice_fltr_mgmt_list_entry *fm_entry;
3352 enum ice_status status = ICE_SUCCESS;
3353 struct LIST_HEAD_TYPE *rule_head;
3354 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3356 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3357 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3359 ice_acquire_lock(rule_lock);
3360 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3362 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3363 u8 *addr = fi->l_data.mac.mac_addr;
3365 /* Update unicast Tx rules to reflect the selected
3368 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3369 (fi->fltr_act == ICE_FWD_TO_VSI ||
3370 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3371 fi->fltr_act == ICE_FWD_TO_Q ||
3372 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3373 status = ice_update_pkt_fwd_rule(hw, fi);
3379 ice_release_lock(rule_lock);
3385 * ice_add_update_vsi_list
3386 * @hw: pointer to the hardware structure
3387 * @m_entry: pointer to current filter management list entry
3388 * @cur_fltr: filter information from the book keeping entry
3389 * @new_fltr: filter information with the new VSI to be added
3391 * Call AQ command to add or update previously created VSI list with new VSI.
3393 * Helper function to do book keeping associated with adding filter information
3394 * The algorithm to do the book keeping is described below :
3395 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3396 * if only one VSI has been added till now
3397 * Allocate a new VSI list and add two VSIs
3398 * to this list using switch rule command
3399 * Update the previously created switch rule with the
3400 * newly created VSI list ID
3401 * if a VSI list was previously created
3402 * Add the new VSI to the previously created VSI list set
3403 * using the update switch rule command
3405 static enum ice_status
3406 ice_add_update_vsi_list(struct ice_hw *hw,
3407 struct ice_fltr_mgmt_list_entry *m_entry,
3408 struct ice_fltr_info *cur_fltr,
3409 struct ice_fltr_info *new_fltr)
3411 enum ice_status status = ICE_SUCCESS;
3412 u16 vsi_list_id = 0;
3414 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3415 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3416 return ICE_ERR_NOT_IMPL;
3418 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3419 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3420 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3421 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3422 return ICE_ERR_NOT_IMPL;
3424 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3425 /* Only one entry existed in the mapping and it was not already
3426 * a part of a VSI list. So, create a VSI list with the old and
3429 struct ice_fltr_info tmp_fltr;
3430 u16 vsi_handle_arr[2];
3432 /* A rule already exists with the new VSI being added */
3433 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3434 return ICE_ERR_ALREADY_EXISTS;
3436 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3437 vsi_handle_arr[1] = new_fltr->vsi_handle;
3438 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3440 new_fltr->lkup_type);
3444 tmp_fltr = *new_fltr;
3445 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3446 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3447 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3448 /* Update the previous switch rule of "MAC forward to VSI" to
3449 * "MAC fwd to VSI list"
3451 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3455 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3456 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3457 m_entry->vsi_list_info =
3458 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3461 /* If this entry was large action then the large action needs
3462 * to be updated to point to FWD to VSI list
3464 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3466 ice_add_marker_act(hw, m_entry,
3467 m_entry->sw_marker_id,
3468 m_entry->lg_act_idx);
3470 u16 vsi_handle = new_fltr->vsi_handle;
3471 enum ice_adminq_opc opcode;
3473 if (!m_entry->vsi_list_info)
3476 /* A rule already exists with the new VSI being added */
3477 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3480 /* Update the previously created VSI list set with
3481 * the new VSI ID passed in
3483 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3484 opcode = ice_aqc_opc_update_sw_rules;
3486 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3487 vsi_list_id, false, opcode,
3488 new_fltr->lkup_type);
3489 /* update VSI list mapping info with new VSI ID */
3491 ice_set_bit(vsi_handle,
3492 m_entry->vsi_list_info->vsi_map);
3495 m_entry->vsi_count++;
3500 * ice_find_rule_entry - Search a rule entry
3501 * @list_head: head of rule list
3502 * @f_info: rule information
3504 * Helper function to search for a given rule entry
3505 * Returns pointer to entry storing the rule if found
3507 static struct ice_fltr_mgmt_list_entry *
3508 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3509 struct ice_fltr_info *f_info)
3511 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3513 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3515 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3516 sizeof(f_info->l_data)) &&
3517 f_info->flag == list_itr->fltr_info.flag) {
3526 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3527 * @recp_list: VSI lists needs to be searched
3528 * @vsi_handle: VSI handle to be found in VSI list
3529 * @vsi_list_id: VSI list ID found containing vsi_handle
3531 * Helper function to search a VSI list with single entry containing given VSI
3532 * handle element. This can be extended further to search VSI list with more
3533 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3535 static struct ice_vsi_list_map_info *
3536 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3539 struct ice_vsi_list_map_info *map_info = NULL;
3540 struct LIST_HEAD_TYPE *list_head;
3542 list_head = &recp_list->filt_rules;
3543 if (recp_list->adv_rule) {
3544 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3546 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3547 ice_adv_fltr_mgmt_list_entry,
3549 if (list_itr->vsi_list_info) {
3550 map_info = list_itr->vsi_list_info;
3551 if (ice_is_bit_set(map_info->vsi_map,
3553 *vsi_list_id = map_info->vsi_list_id;
3559 struct ice_fltr_mgmt_list_entry *list_itr;
3561 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3562 ice_fltr_mgmt_list_entry,
3564 if (list_itr->vsi_count == 1 &&
3565 list_itr->vsi_list_info) {
3566 map_info = list_itr->vsi_list_info;
3567 if (ice_is_bit_set(map_info->vsi_map,
3569 *vsi_list_id = map_info->vsi_list_id;
3579 * ice_add_rule_internal - add rule for a given lookup type
3580 * @hw: pointer to the hardware structure
3581 * @recp_list: recipe list for which rule has to be added
3582 * @lport: logic port number on which function add rule
3583 * @f_entry: structure containing MAC forwarding information
3585 * Adds or updates the rule lists for a given recipe
3587 static enum ice_status
3588 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3589 u8 lport, struct ice_fltr_list_entry *f_entry)
3591 struct ice_fltr_info *new_fltr, *cur_fltr;
3592 struct ice_fltr_mgmt_list_entry *m_entry;
3593 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3594 enum ice_status status = ICE_SUCCESS;
3596 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3597 return ICE_ERR_PARAM;
3599 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3600 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3601 f_entry->fltr_info.fwd_id.hw_vsi_id =
3602 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3604 rule_lock = &recp_list->filt_rule_lock;
3606 ice_acquire_lock(rule_lock);
3607 new_fltr = &f_entry->fltr_info;
3608 if (new_fltr->flag & ICE_FLTR_RX)
3609 new_fltr->src = lport;
3610 else if (new_fltr->flag & ICE_FLTR_TX)
3612 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3614 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3616 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3617 goto exit_add_rule_internal;
3620 cur_fltr = &m_entry->fltr_info;
3621 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3623 exit_add_rule_internal:
3624 ice_release_lock(rule_lock);
3629 * ice_remove_vsi_list_rule
3630 * @hw: pointer to the hardware structure
3631 * @vsi_list_id: VSI list ID generated as part of allocate resource
3632 * @lkup_type: switch rule filter lookup type
3634 * The VSI list should be emptied before this function is called to remove the
3637 static enum ice_status
3638 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3639 enum ice_sw_lkup_type lkup_type)
3641 /* Free the vsi_list resource that we allocated. It is assumed that the
3642 * list is empty at this point.
3644 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3645 ice_aqc_opc_free_res);
3649 * ice_rem_update_vsi_list
3650 * @hw: pointer to the hardware structure
3651 * @vsi_handle: VSI handle of the VSI to remove
3652 * @fm_list: filter management entry for which the VSI list management needs to
3655 static enum ice_status
3656 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3657 struct ice_fltr_mgmt_list_entry *fm_list)
3659 enum ice_sw_lkup_type lkup_type;
3660 enum ice_status status = ICE_SUCCESS;
3663 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3664 fm_list->vsi_count == 0)
3665 return ICE_ERR_PARAM;
3667 /* A rule with the VSI being removed does not exist */
3668 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3669 return ICE_ERR_DOES_NOT_EXIST;
3671 lkup_type = fm_list->fltr_info.lkup_type;
3672 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3673 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3674 ice_aqc_opc_update_sw_rules,
3679 fm_list->vsi_count--;
3680 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3682 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3683 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3684 struct ice_vsi_list_map_info *vsi_list_info =
3685 fm_list->vsi_list_info;
3688 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3690 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3691 return ICE_ERR_OUT_OF_RANGE;
3693 /* Make sure VSI list is empty before removing it below */
3694 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3696 ice_aqc_opc_update_sw_rules,
3701 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3702 tmp_fltr_info.fwd_id.hw_vsi_id =
3703 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3704 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3705 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3707 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3708 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3712 fm_list->fltr_info = tmp_fltr_info;
3715 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3716 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3717 struct ice_vsi_list_map_info *vsi_list_info =
3718 fm_list->vsi_list_info;
3720 /* Remove the VSI list since it is no longer used */
3721 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3723 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3724 vsi_list_id, status);
3728 LIST_DEL(&vsi_list_info->list_entry);
3729 ice_free(hw, vsi_list_info);
3730 fm_list->vsi_list_info = NULL;
3737 * ice_remove_rule_internal - Remove a filter rule of a given type
3739 * @hw: pointer to the hardware structure
3740 * @recp_list: recipe list for which the rule needs to removed
3741 * @f_entry: rule entry containing filter information
3743 static enum ice_status
3744 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3745 struct ice_fltr_list_entry *f_entry)
3747 struct ice_fltr_mgmt_list_entry *list_elem;
3748 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3749 enum ice_status status = ICE_SUCCESS;
3750 bool remove_rule = false;
3753 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3754 return ICE_ERR_PARAM;
3755 f_entry->fltr_info.fwd_id.hw_vsi_id =
3756 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3758 rule_lock = &recp_list->filt_rule_lock;
3759 ice_acquire_lock(rule_lock);
3760 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3761 &f_entry->fltr_info);
3763 status = ICE_ERR_DOES_NOT_EXIST;
3767 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3769 } else if (!list_elem->vsi_list_info) {
3770 status = ICE_ERR_DOES_NOT_EXIST;
3772 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3773 /* a ref_cnt > 1 indicates that the vsi_list is being
3774 * shared by multiple rules. Decrement the ref_cnt and
3775 * remove this rule, but do not modify the list, as it
3776 * is in-use by other rules.
3778 list_elem->vsi_list_info->ref_cnt--;
3781 /* a ref_cnt of 1 indicates the vsi_list is only used
3782 * by one rule. However, the original removal request is only
3783 * for a single VSI. Update the vsi_list first, and only
3784 * remove the rule if there are no further VSIs in this list.
3786 vsi_handle = f_entry->fltr_info.vsi_handle;
3787 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3790 /* if VSI count goes to zero after updating the VSI list */
3791 if (list_elem->vsi_count == 0)
3796 /* Remove the lookup rule */
3797 struct ice_aqc_sw_rules_elem *s_rule;
3799 s_rule = (struct ice_aqc_sw_rules_elem *)
3800 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3802 status = ICE_ERR_NO_MEMORY;
3806 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3807 ice_aqc_opc_remove_sw_rules);
3809 status = ice_aq_sw_rules(hw, s_rule,
3810 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3811 ice_aqc_opc_remove_sw_rules, NULL);
3813 /* Remove a book keeping from the list */
3814 ice_free(hw, s_rule);
3819 LIST_DEL(&list_elem->list_entry);
3820 ice_free(hw, list_elem);
3823 ice_release_lock(rule_lock);
3828 * ice_aq_get_res_alloc - get allocated resources
3829 * @hw: pointer to the HW struct
3830 * @num_entries: pointer to u16 to store the number of resource entries returned
3831 * @buf: pointer to buffer
3832 * @buf_size: size of buf
3833 * @cd: pointer to command details structure or NULL
3835 * The caller-supplied buffer must be large enough to store the resource
3836 * information for all resource types. Each resource type is an
3837 * ice_aqc_get_res_resp_elem structure.
3840 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
3841 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
3842 struct ice_sq_cd *cd)
3844 struct ice_aqc_get_res_alloc *resp;
3845 enum ice_status status;
3846 struct ice_aq_desc desc;
3849 return ICE_ERR_BAD_PTR;
3851 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3852 return ICE_ERR_INVAL_SIZE;
3854 resp = &desc.params.get_res;
3856 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3857 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3859 if (!status && num_entries)
3860 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3866 * ice_aq_get_res_descs - get allocated resource descriptors
3867 * @hw: pointer to the hardware structure
3868 * @num_entries: number of resource entries in buffer
3869 * @buf: structure to hold response data buffer
3870 * @buf_size: size of buffer
3871 * @res_type: resource type
3872 * @res_shared: is resource shared
3873 * @desc_id: input - first desc ID to start; output - next desc ID
3874 * @cd: pointer to command details structure or NULL
3877 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3878 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
3879 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
3881 struct ice_aqc_get_allocd_res_desc *cmd;
3882 struct ice_aq_desc desc;
3883 enum ice_status status;
3885 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3887 cmd = &desc.params.get_res_desc;
3890 return ICE_ERR_PARAM;
3892 if (buf_size != (num_entries * sizeof(*buf)))
3893 return ICE_ERR_PARAM;
3895 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3897 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3898 ICE_AQC_RES_TYPE_M) | (res_shared ?
3899 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3900 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3902 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3904 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3910 * ice_add_mac_rule - Add a MAC address based filter rule
3911 * @hw: pointer to the hardware structure
3912 * @m_list: list of MAC addresses and forwarding information
3913 * @sw: pointer to switch info struct for which function add rule
3914 * @lport: logic port number on which function add rule
3916 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3917 * multiple unicast addresses, the function assumes that all the
3918 * addresses are unique in a given add_mac call. It doesn't
3919 * check for duplicates in this case, removing duplicates from a given
3920 * list should be taken care of in the caller of this function.
3922 static enum ice_status
3923 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3924 struct ice_switch_info *sw, u8 lport)
3926 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3927 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3928 struct ice_fltr_list_entry *m_list_itr;
3929 struct LIST_HEAD_TYPE *rule_head;
3930 u16 total_elem_left, s_rule_size;
3931 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3932 enum ice_status status = ICE_SUCCESS;
3933 u16 num_unicast = 0;
3937 rule_lock = &recp_list->filt_rule_lock;
3938 rule_head = &recp_list->filt_rules;
3940 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3942 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3946 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3947 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3948 if (!ice_is_vsi_valid(hw, vsi_handle))
3949 return ICE_ERR_PARAM;
3950 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3951 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3952 /* update the src in case it is VSI num */
3953 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3954 return ICE_ERR_PARAM;
3955 m_list_itr->fltr_info.src = hw_vsi_id;
3956 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3957 IS_ZERO_ETHER_ADDR(add))
3958 return ICE_ERR_PARAM;
3959 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3960 /* Don't overwrite the unicast address */
3961 ice_acquire_lock(rule_lock);
3962 if (ice_find_rule_entry(rule_head,
3963 &m_list_itr->fltr_info)) {
3964 ice_release_lock(rule_lock);
3965 return ICE_ERR_ALREADY_EXISTS;
3967 ice_release_lock(rule_lock);
3969 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3970 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3971 m_list_itr->status =
3972 ice_add_rule_internal(hw, recp_list, lport,
3974 if (m_list_itr->status)
3975 return m_list_itr->status;
3979 ice_acquire_lock(rule_lock);
3980 /* Exit if no suitable entries were found for adding bulk switch rule */
3982 status = ICE_SUCCESS;
3983 goto ice_add_mac_exit;
3986 /* Allocate switch rule buffer for the bulk update for unicast */
3987 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3988 s_rule = (struct ice_aqc_sw_rules_elem *)
3989 ice_calloc(hw, num_unicast, s_rule_size);
3991 status = ICE_ERR_NO_MEMORY;
3992 goto ice_add_mac_exit;
3996 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3998 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3999 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4001 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4002 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4003 ice_aqc_opc_add_sw_rules);
4004 r_iter = (struct ice_aqc_sw_rules_elem *)
4005 ((u8 *)r_iter + s_rule_size);
4009 /* Call AQ bulk switch rule update for all unicast addresses */
4011 /* Call AQ switch rule in AQ_MAX chunk */
4012 for (total_elem_left = num_unicast; total_elem_left > 0;
4013 total_elem_left -= elem_sent) {
4014 struct ice_aqc_sw_rules_elem *entry = r_iter;
4016 elem_sent = MIN_T(u8, total_elem_left,
4017 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4018 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4019 elem_sent, ice_aqc_opc_add_sw_rules,
4022 goto ice_add_mac_exit;
4023 r_iter = (struct ice_aqc_sw_rules_elem *)
4024 ((u8 *)r_iter + (elem_sent * s_rule_size));
4027 /* Fill up rule ID based on the value returned from FW */
4029 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4031 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4032 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4033 struct ice_fltr_mgmt_list_entry *fm_entry;
4035 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4036 f_info->fltr_rule_id =
4037 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4038 f_info->fltr_act = ICE_FWD_TO_VSI;
4039 /* Create an entry to track this MAC address */
4040 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4041 ice_malloc(hw, sizeof(*fm_entry));
4043 status = ICE_ERR_NO_MEMORY;
4044 goto ice_add_mac_exit;
4046 fm_entry->fltr_info = *f_info;
4047 fm_entry->vsi_count = 1;
4048 /* The book keeping entries will get removed when
4049 * base driver calls remove filter AQ command
4052 LIST_ADD(&fm_entry->list_entry, rule_head);
4053 r_iter = (struct ice_aqc_sw_rules_elem *)
4054 ((u8 *)r_iter + s_rule_size);
4059 ice_release_lock(rule_lock);
4061 ice_free(hw, s_rule);
4066 * ice_add_mac - Add a MAC address based filter rule
4067 * @hw: pointer to the hardware structure
4068 * @m_list: list of MAC addresses and forwarding information
4070 * Function add MAC rule for logical port from HW struct
4072 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4075 return ICE_ERR_PARAM;
4077 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4078 hw->port_info->lport);
4082 * ice_add_vlan_internal - Add one VLAN based filter rule
4083 * @hw: pointer to the hardware structure
4084 * @recp_list: recipe list for which rule has to be added
4085 * @f_entry: filter entry containing one VLAN information
4087 static enum ice_status
4088 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4089 struct ice_fltr_list_entry *f_entry)
4091 struct ice_fltr_mgmt_list_entry *v_list_itr;
4092 struct ice_fltr_info *new_fltr, *cur_fltr;
4093 enum ice_sw_lkup_type lkup_type;
4094 u16 vsi_list_id = 0, vsi_handle;
4095 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4096 enum ice_status status = ICE_SUCCESS;
4098 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4099 return ICE_ERR_PARAM;
4101 f_entry->fltr_info.fwd_id.hw_vsi_id =
4102 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4103 new_fltr = &f_entry->fltr_info;
4105 /* VLAN ID should only be 12 bits */
4106 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4107 return ICE_ERR_PARAM;
4109 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4110 return ICE_ERR_PARAM;
4112 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4113 lkup_type = new_fltr->lkup_type;
4114 vsi_handle = new_fltr->vsi_handle;
4115 rule_lock = &recp_list->filt_rule_lock;
4116 ice_acquire_lock(rule_lock);
4117 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4119 struct ice_vsi_list_map_info *map_info = NULL;
4121 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4122 /* All VLAN pruning rules use a VSI list. Check if
4123 * there is already a VSI list containing VSI that we
4124 * want to add. If found, use the same vsi_list_id for
4125 * this new VLAN rule or else create a new list.
4127 map_info = ice_find_vsi_list_entry(recp_list,
4131 status = ice_create_vsi_list_rule(hw,
4139 /* Convert the action to forwarding to a VSI list. */
4140 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4141 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4144 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4146 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4149 status = ICE_ERR_DOES_NOT_EXIST;
4152 /* reuse VSI list for new rule and increment ref_cnt */
4154 v_list_itr->vsi_list_info = map_info;
4155 map_info->ref_cnt++;
4157 v_list_itr->vsi_list_info =
4158 ice_create_vsi_list_map(hw, &vsi_handle,
4162 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4163 /* Update existing VSI list to add new VSI ID only if it used
4166 cur_fltr = &v_list_itr->fltr_info;
4167 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4170 /* If VLAN rule exists and VSI list being used by this rule is
4171 * referenced by more than 1 VLAN rule. Then create a new VSI
4172 * list appending previous VSI with new VSI and update existing
4173 * VLAN rule to point to new VSI list ID
4175 struct ice_fltr_info tmp_fltr;
4176 u16 vsi_handle_arr[2];
4179 /* Current implementation only supports reusing VSI list with
4180 * one VSI count. We should never hit below condition
4182 if (v_list_itr->vsi_count > 1 &&
4183 v_list_itr->vsi_list_info->ref_cnt > 1) {
4184 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4185 status = ICE_ERR_CFG;
4190 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4193 /* A rule already exists with the new VSI being added */
4194 if (cur_handle == vsi_handle) {
4195 status = ICE_ERR_ALREADY_EXISTS;
4199 vsi_handle_arr[0] = cur_handle;
4200 vsi_handle_arr[1] = vsi_handle;
4201 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4202 &vsi_list_id, lkup_type);
4206 tmp_fltr = v_list_itr->fltr_info;
4207 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4208 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4209 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4210 /* Update the previous switch rule to a new VSI list which
4211 * includes current VSI that is requested
4213 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4217 /* before overriding VSI list map info. decrement ref_cnt of
4220 v_list_itr->vsi_list_info->ref_cnt--;
4222 /* now update to newly created list */
4223 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4224 v_list_itr->vsi_list_info =
4225 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4227 v_list_itr->vsi_count++;
4231 ice_release_lock(rule_lock);
4236 * ice_add_vlan_rule - Add VLAN based filter rule
4237 * @hw: pointer to the hardware structure
4238 * @v_list: list of VLAN entries and forwarding information
4239 * @sw: pointer to switch info struct for which function add rule
4241 static enum ice_status
4242 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4243 struct ice_switch_info *sw)
4245 struct ice_fltr_list_entry *v_list_itr;
4246 struct ice_sw_recipe *recp_list;
4248 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4249 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4251 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4252 return ICE_ERR_PARAM;
4253 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4254 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4256 if (v_list_itr->status)
4257 return v_list_itr->status;
4263 * ice_add_vlan - Add a VLAN based filter rule
4264 * @hw: pointer to the hardware structure
4265 * @v_list: list of VLAN and forwarding information
4267 * Function add VLAN rule for logical port from HW struct
4269 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4272 return ICE_ERR_PARAM;
4274 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4278 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4279 * @hw: pointer to the hardware structure
4280 * @mv_list: list of MAC and VLAN filters
4281 * @sw: pointer to switch info struct for which function add rule
4282 * @lport: logic port number on which function add rule
4284 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4285 * pruning bits enabled, then it is the responsibility of the caller to make
4286 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4287 * VLAN won't be received on that VSI otherwise.
4289 static enum ice_status
4290 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4291 struct ice_switch_info *sw, u8 lport)
4293 struct ice_fltr_list_entry *mv_list_itr;
4294 struct ice_sw_recipe *recp_list;
4296 if (!mv_list || !hw)
4297 return ICE_ERR_PARAM;
4299 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4300 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4302 enum ice_sw_lkup_type l_type =
4303 mv_list_itr->fltr_info.lkup_type;
4305 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4306 return ICE_ERR_PARAM;
4307 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4308 mv_list_itr->status =
4309 ice_add_rule_internal(hw, recp_list, lport,
4311 if (mv_list_itr->status)
4312 return mv_list_itr->status;
4318 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4319 * @hw: pointer to the hardware structure
4320 * @mv_list: list of MAC VLAN addresses and forwarding information
4322 * Function add MAC VLAN rule for logical port from HW struct
4325 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4327 if (!mv_list || !hw)
4328 return ICE_ERR_PARAM;
4330 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4331 hw->port_info->lport);
4335 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4336 * @hw: pointer to the hardware structure
4337 * @em_list: list of ether type MAC filter, MAC is optional
4338 * @sw: pointer to switch info struct for which function add rule
4339 * @lport: logic port number on which function add rule
4341 * This function requires the caller to populate the entries in
4342 * the filter list with the necessary fields (including flags to
4343 * indicate Tx or Rx rules).
4345 static enum ice_status
4346 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4347 struct ice_switch_info *sw, u8 lport)
4349 struct ice_fltr_list_entry *em_list_itr;
4351 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4353 struct ice_sw_recipe *recp_list;
4354 enum ice_sw_lkup_type l_type;
4356 l_type = em_list_itr->fltr_info.lkup_type;
4357 recp_list = &sw->recp_list[l_type];
4359 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4360 l_type != ICE_SW_LKUP_ETHERTYPE)
4361 return ICE_ERR_PARAM;
4363 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4366 if (em_list_itr->status)
4367 return em_list_itr->status;
4373 * ice_add_eth_mac - Add a ethertype based filter rule
4374 * @hw: pointer to the hardware structure
4375 * @em_list: list of ethertype and forwarding information
4377 * Function add ethertype rule for logical port from HW struct
4380 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4382 if (!em_list || !hw)
4383 return ICE_ERR_PARAM;
4385 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4386 hw->port_info->lport);
4390 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4391 * @hw: pointer to the hardware structure
4392 * @em_list: list of ethertype or ethertype MAC entries
4393 * @sw: pointer to switch info struct for which function add rule
4395 static enum ice_status
4396 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4397 struct ice_switch_info *sw)
4399 struct ice_fltr_list_entry *em_list_itr, *tmp;
4401 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4403 struct ice_sw_recipe *recp_list;
4404 enum ice_sw_lkup_type l_type;
4406 l_type = em_list_itr->fltr_info.lkup_type;
4408 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4409 l_type != ICE_SW_LKUP_ETHERTYPE)
4410 return ICE_ERR_PARAM;
4412 recp_list = &sw->recp_list[l_type];
4413 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4415 if (em_list_itr->status)
4416 return em_list_itr->status;
4422 * ice_remove_eth_mac - remove a ethertype based filter rule
4423 * @hw: pointer to the hardware structure
4424 * @em_list: list of ethertype and forwarding information
4428 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4430 if (!em_list || !hw)
4431 return ICE_ERR_PARAM;
4433 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4437 * ice_rem_sw_rule_info
4438 * @hw: pointer to the hardware structure
4439 * @rule_head: pointer to the switch list structure that we want to delete
4442 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4444 if (!LIST_EMPTY(rule_head)) {
4445 struct ice_fltr_mgmt_list_entry *entry;
4446 struct ice_fltr_mgmt_list_entry *tmp;
4448 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4449 ice_fltr_mgmt_list_entry, list_entry) {
4450 LIST_DEL(&entry->list_entry);
4451 ice_free(hw, entry);
4457 * ice_rem_adv_rule_info
4458 * @hw: pointer to the hardware structure
4459 * @rule_head: pointer to the switch list structure that we want to delete
4462 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4464 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4465 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4467 if (LIST_EMPTY(rule_head))
4470 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4471 ice_adv_fltr_mgmt_list_entry, list_entry) {
4472 LIST_DEL(&lst_itr->list_entry);
4473 ice_free(hw, lst_itr->lkups);
4474 ice_free(hw, lst_itr);
4479 * ice_rem_all_sw_rules_info
4480 * @hw: pointer to the hardware structure
4482 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4484 struct ice_switch_info *sw = hw->switch_info;
4487 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4488 struct LIST_HEAD_TYPE *rule_head;
4490 rule_head = &sw->recp_list[i].filt_rules;
4491 if (!sw->recp_list[i].adv_rule)
4492 ice_rem_sw_rule_info(hw, rule_head);
4494 ice_rem_adv_rule_info(hw, rule_head);
4495 if (sw->recp_list[i].adv_rule &&
4496 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4497 sw->recp_list[i].adv_rule = false;
4502 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4503 * @pi: pointer to the port_info structure
4504 * @vsi_handle: VSI handle to set as default
4505 * @set: true to add the above mentioned switch rule, false to remove it
4506 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4508 * add filter rule to set/unset given VSI as default VSI for the switch
4509 * (represented by swid)
4512 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4515 struct ice_aqc_sw_rules_elem *s_rule;
4516 struct ice_fltr_info f_info;
4517 struct ice_hw *hw = pi->hw;
4518 enum ice_adminq_opc opcode;
4519 enum ice_status status;
4523 if (!ice_is_vsi_valid(hw, vsi_handle))
4524 return ICE_ERR_PARAM;
4525 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4527 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4528 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4529 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4531 return ICE_ERR_NO_MEMORY;
4533 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4535 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4536 f_info.flag = direction;
4537 f_info.fltr_act = ICE_FWD_TO_VSI;
4538 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4540 if (f_info.flag & ICE_FLTR_RX) {
4541 f_info.src = pi->lport;
4542 f_info.src_id = ICE_SRC_ID_LPORT;
4544 f_info.fltr_rule_id =
4545 pi->dflt_rx_vsi_rule_id;
4546 } else if (f_info.flag & ICE_FLTR_TX) {
4547 f_info.src_id = ICE_SRC_ID_VSI;
4548 f_info.src = hw_vsi_id;
4550 f_info.fltr_rule_id =
4551 pi->dflt_tx_vsi_rule_id;
4555 opcode = ice_aqc_opc_add_sw_rules;
4557 opcode = ice_aqc_opc_remove_sw_rules;
4559 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4561 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4562 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4565 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4567 if (f_info.flag & ICE_FLTR_TX) {
4568 pi->dflt_tx_vsi_num = hw_vsi_id;
4569 pi->dflt_tx_vsi_rule_id = index;
4570 } else if (f_info.flag & ICE_FLTR_RX) {
4571 pi->dflt_rx_vsi_num = hw_vsi_id;
4572 pi->dflt_rx_vsi_rule_id = index;
4575 if (f_info.flag & ICE_FLTR_TX) {
4576 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4577 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4578 } else if (f_info.flag & ICE_FLTR_RX) {
4579 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4580 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4585 ice_free(hw, s_rule);
4590 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4591 * @list_head: head of rule list
4592 * @f_info: rule information
4594 * Helper function to search for a unicast rule entry - this is to be used
4595 * to remove unicast MAC filter that is not shared with other VSIs on the
4598 * Returns pointer to entry storing the rule if found
4600 static struct ice_fltr_mgmt_list_entry *
4601 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4602 struct ice_fltr_info *f_info)
4604 struct ice_fltr_mgmt_list_entry *list_itr;
4606 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4608 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4609 sizeof(f_info->l_data)) &&
4610 f_info->fwd_id.hw_vsi_id ==
4611 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4612 f_info->flag == list_itr->fltr_info.flag)
4619 * ice_remove_mac_rule - remove a MAC based filter rule
4620 * @hw: pointer to the hardware structure
4621 * @m_list: list of MAC addresses and forwarding information
4622 * @recp_list: list from which function remove MAC address
4624 * This function removes either a MAC filter rule or a specific VSI from a
4625 * VSI list for a multicast MAC address.
4627 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4628 * ice_add_mac. Caller should be aware that this call will only work if all
4629 * the entries passed into m_list were added previously. It will not attempt to
4630 * do a partial remove of entries that were found.
4632 static enum ice_status
4633 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4634 struct ice_sw_recipe *recp_list)
4636 struct ice_fltr_list_entry *list_itr, *tmp;
4637 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4640 return ICE_ERR_PARAM;
4642 rule_lock = &recp_list->filt_rule_lock;
4643 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4645 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4646 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4649 if (l_type != ICE_SW_LKUP_MAC)
4650 return ICE_ERR_PARAM;
4652 vsi_handle = list_itr->fltr_info.vsi_handle;
4653 if (!ice_is_vsi_valid(hw, vsi_handle))
4654 return ICE_ERR_PARAM;
4656 list_itr->fltr_info.fwd_id.hw_vsi_id =
4657 ice_get_hw_vsi_num(hw, vsi_handle);
4658 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4659 /* Don't remove the unicast address that belongs to
4660 * another VSI on the switch, since it is not being
4663 ice_acquire_lock(rule_lock);
4664 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4665 &list_itr->fltr_info)) {
4666 ice_release_lock(rule_lock);
4667 return ICE_ERR_DOES_NOT_EXIST;
4669 ice_release_lock(rule_lock);
4671 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4673 if (list_itr->status)
4674 return list_itr->status;
4680 * ice_remove_mac - remove a MAC address based filter rule
4681 * @hw: pointer to the hardware structure
4682 * @m_list: list of MAC addresses and forwarding information
4685 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4687 struct ice_sw_recipe *recp_list;
4689 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4690 return ice_remove_mac_rule(hw, m_list, recp_list);
4694 * ice_remove_vlan_rule - Remove VLAN based filter rule
4695 * @hw: pointer to the hardware structure
4696 * @v_list: list of VLAN entries and forwarding information
4697 * @recp_list: list from which function remove VLAN
4699 static enum ice_status
4700 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4701 struct ice_sw_recipe *recp_list)
4703 struct ice_fltr_list_entry *v_list_itr, *tmp;
4705 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4707 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4709 if (l_type != ICE_SW_LKUP_VLAN)
4710 return ICE_ERR_PARAM;
4711 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4713 if (v_list_itr->status)
4714 return v_list_itr->status;
4720 * ice_remove_vlan - remove a VLAN address based filter rule
4721 * @hw: pointer to the hardware structure
4722 * @v_list: list of VLAN and forwarding information
4726 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4728 struct ice_sw_recipe *recp_list;
4731 return ICE_ERR_PARAM;
4733 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4734 return ice_remove_vlan_rule(hw, v_list, recp_list);
4738 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4739 * @hw: pointer to the hardware structure
4740 * @v_list: list of MAC VLAN entries and forwarding information
4741 * @recp_list: list from which function remove MAC VLAN
4743 static enum ice_status
4744 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4745 struct ice_sw_recipe *recp_list)
4747 struct ice_fltr_list_entry *v_list_itr, *tmp;
4749 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4750 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4752 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4754 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4755 return ICE_ERR_PARAM;
4756 v_list_itr->status =
4757 ice_remove_rule_internal(hw, recp_list,
4759 if (v_list_itr->status)
4760 return v_list_itr->status;
4766 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4767 * @hw: pointer to the hardware structure
4768 * @mv_list: list of MAC VLAN and forwarding information
4771 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4773 struct ice_sw_recipe *recp_list;
4775 if (!mv_list || !hw)
4776 return ICE_ERR_PARAM;
4778 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4779 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4783 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4784 * @fm_entry: filter entry to inspect
4785 * @vsi_handle: VSI handle to compare with filter info
4788 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4790 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4791 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4792 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4793 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4798 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4799 * @hw: pointer to the hardware structure
4800 * @vsi_handle: VSI handle to remove filters from
4801 * @vsi_list_head: pointer to the list to add entry to
4802 * @fi: pointer to fltr_info of filter entry to copy & add
4804 * Helper function, used when creating a list of filters to remove from
4805 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4806 * original filter entry, with the exception of fltr_info.fltr_act and
4807 * fltr_info.fwd_id fields. These are set such that later logic can
4808 * extract which VSI to remove the fltr from, and pass on that information.
4810 static enum ice_status
4811 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4812 struct LIST_HEAD_TYPE *vsi_list_head,
4813 struct ice_fltr_info *fi)
4815 struct ice_fltr_list_entry *tmp;
4817 /* this memory is freed up in the caller function
4818 * once filters for this VSI are removed
4820 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4822 return ICE_ERR_NO_MEMORY;
4824 tmp->fltr_info = *fi;
4826 /* Overwrite these fields to indicate which VSI to remove filter from,
4827 * so find and remove logic can extract the information from the
4828 * list entries. Note that original entries will still have proper
4831 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4832 tmp->fltr_info.vsi_handle = vsi_handle;
4833 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4835 LIST_ADD(&tmp->list_entry, vsi_list_head);
4841 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4842 * @hw: pointer to the hardware structure
4843 * @vsi_handle: VSI handle to remove filters from
4844 * @lkup_list_head: pointer to the list that has certain lookup type filters
4845 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4847 * Locates all filters in lkup_list_head that are used by the given VSI,
4848 * and adds COPIES of those entries to vsi_list_head (intended to be used
4849 * to remove the listed filters).
4850 * Note that this means all entries in vsi_list_head must be explicitly
4851 * deallocated by the caller when done with list.
4853 static enum ice_status
4854 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4855 struct LIST_HEAD_TYPE *lkup_list_head,
4856 struct LIST_HEAD_TYPE *vsi_list_head)
4858 struct ice_fltr_mgmt_list_entry *fm_entry;
4859 enum ice_status status = ICE_SUCCESS;
4861 /* check to make sure VSI ID is valid and within boundary */
4862 if (!ice_is_vsi_valid(hw, vsi_handle))
4863 return ICE_ERR_PARAM;
4865 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4866 ice_fltr_mgmt_list_entry, list_entry) {
4867 struct ice_fltr_info *fi;
4869 fi = &fm_entry->fltr_info;
4870 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4873 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4882 * ice_determine_promisc_mask
4883 * @fi: filter info to parse
4885 * Helper function to determine which ICE_PROMISC_ mask corresponds
4886 * to given filter into.
4888 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4890 u16 vid = fi->l_data.mac_vlan.vlan_id;
4891 u8 *macaddr = fi->l_data.mac.mac_addr;
4892 bool is_tx_fltr = false;
4893 u8 promisc_mask = 0;
4895 if (fi->flag == ICE_FLTR_TX)
4898 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4899 promisc_mask |= is_tx_fltr ?
4900 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4901 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4902 promisc_mask |= is_tx_fltr ?
4903 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4904 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4905 promisc_mask |= is_tx_fltr ?
4906 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4908 promisc_mask |= is_tx_fltr ?
4909 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4911 return promisc_mask;
4915 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4916 * @hw: pointer to the hardware structure
4917 * @vsi_handle: VSI handle to retrieve info from
4918 * @promisc_mask: pointer to mask to be filled in
4919 * @vid: VLAN ID of promisc VLAN VSI
4920 * @sw: pointer to switch info struct for which function add rule
4922 static enum ice_status
4923 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4924 u16 *vid, struct ice_switch_info *sw)
4926 struct ice_fltr_mgmt_list_entry *itr;
4927 struct LIST_HEAD_TYPE *rule_head;
4928 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4930 if (!ice_is_vsi_valid(hw, vsi_handle))
4931 return ICE_ERR_PARAM;
4935 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4936 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4938 ice_acquire_lock(rule_lock);
4939 LIST_FOR_EACH_ENTRY(itr, rule_head,
4940 ice_fltr_mgmt_list_entry, list_entry) {
4941 /* Continue if this filter doesn't apply to this VSI or the
4942 * VSI ID is not in the VSI map for this filter
4944 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4947 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4949 ice_release_lock(rule_lock);
4955 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4956 * @hw: pointer to the hardware structure
4957 * @vsi_handle: VSI handle to retrieve info from
4958 * @promisc_mask: pointer to mask to be filled in
4959 * @vid: VLAN ID of promisc VLAN VSI
4962 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4965 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4966 vid, hw->switch_info);
4970 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4971 * @hw: pointer to the hardware structure
4972 * @vsi_handle: VSI handle to retrieve info from
4973 * @promisc_mask: pointer to mask to be filled in
4974 * @vid: VLAN ID of promisc VLAN VSI
4975 * @sw: pointer to switch info struct for which function add rule
4977 static enum ice_status
4978 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4979 u16 *vid, struct ice_switch_info *sw)
4981 struct ice_fltr_mgmt_list_entry *itr;
4982 struct LIST_HEAD_TYPE *rule_head;
4983 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4985 if (!ice_is_vsi_valid(hw, vsi_handle))
4986 return ICE_ERR_PARAM;
4990 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4991 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4993 ice_acquire_lock(rule_lock);
4994 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4996 /* Continue if this filter doesn't apply to this VSI or the
4997 * VSI ID is not in the VSI map for this filter
4999 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5002 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5004 ice_release_lock(rule_lock);
5010 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5011 * @hw: pointer to the hardware structure
5012 * @vsi_handle: VSI handle to retrieve info from
5013 * @promisc_mask: pointer to mask to be filled in
5014 * @vid: VLAN ID of promisc VLAN VSI
5017 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5020 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5021 vid, hw->switch_info);
5025 * ice_remove_promisc - Remove promisc based filter rules
5026 * @hw: pointer to the hardware structure
5027 * @recp_id: recipe ID for which the rule needs to removed
5028 * @v_list: list of promisc entries
5030 static enum ice_status
5031 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5032 struct LIST_HEAD_TYPE *v_list)
5034 struct ice_fltr_list_entry *v_list_itr, *tmp;
5035 struct ice_sw_recipe *recp_list;
5037 recp_list = &hw->switch_info->recp_list[recp_id];
5038 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5040 v_list_itr->status =
5041 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5042 if (v_list_itr->status)
5043 return v_list_itr->status;
5049 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5050 * @hw: pointer to the hardware structure
5051 * @vsi_handle: VSI handle to clear mode
5052 * @promisc_mask: mask of promiscuous config bits to clear
5053 * @vid: VLAN ID to clear VLAN promiscuous
5054 * @sw: pointer to switch info struct for which function add rule
5056 static enum ice_status
5057 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5058 u16 vid, struct ice_switch_info *sw)
5060 struct ice_fltr_list_entry *fm_entry, *tmp;
5061 struct LIST_HEAD_TYPE remove_list_head;
5062 struct ice_fltr_mgmt_list_entry *itr;
5063 struct LIST_HEAD_TYPE *rule_head;
5064 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5065 enum ice_status status = ICE_SUCCESS;
5068 if (!ice_is_vsi_valid(hw, vsi_handle))
5069 return ICE_ERR_PARAM;
5071 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5072 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5074 recipe_id = ICE_SW_LKUP_PROMISC;
5076 rule_head = &sw->recp_list[recipe_id].filt_rules;
5077 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5079 INIT_LIST_HEAD(&remove_list_head);
5081 ice_acquire_lock(rule_lock);
5082 LIST_FOR_EACH_ENTRY(itr, rule_head,
5083 ice_fltr_mgmt_list_entry, list_entry) {
5084 struct ice_fltr_info *fltr_info;
5085 u8 fltr_promisc_mask = 0;
5087 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5089 fltr_info = &itr->fltr_info;
5091 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5092 vid != fltr_info->l_data.mac_vlan.vlan_id)
5095 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5097 /* Skip if filter is not completely specified by given mask */
5098 if (fltr_promisc_mask & ~promisc_mask)
5101 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5105 ice_release_lock(rule_lock);
5106 goto free_fltr_list;
5109 ice_release_lock(rule_lock);
5111 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5114 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5115 ice_fltr_list_entry, list_entry) {
5116 LIST_DEL(&fm_entry->list_entry);
5117 ice_free(hw, fm_entry);
5124 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5125 * @hw: pointer to the hardware structure
5126 * @vsi_handle: VSI handle to clear mode
5127 * @promisc_mask: mask of promiscuous config bits to clear
5128 * @vid: VLAN ID to clear VLAN promiscuous
5131 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5132 u8 promisc_mask, u16 vid)
5134 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5135 vid, hw->switch_info);
5139 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5140 * @hw: pointer to the hardware structure
5141 * @vsi_handle: VSI handle to configure
5142 * @promisc_mask: mask of promiscuous config bits
5143 * @vid: VLAN ID to set VLAN promiscuous
5144 * @lport: logical port number to configure promisc mode
5145 * @sw: pointer to switch info struct for which function add rule
5147 static enum ice_status
5148 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5149 u16 vid, u8 lport, struct ice_switch_info *sw)
5151 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5152 struct ice_fltr_list_entry f_list_entry;
5153 struct ice_fltr_info new_fltr;
5154 enum ice_status status = ICE_SUCCESS;
5160 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5162 if (!ice_is_vsi_valid(hw, vsi_handle))
5163 return ICE_ERR_PARAM;
5164 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5166 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5168 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5169 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5170 new_fltr.l_data.mac_vlan.vlan_id = vid;
5171 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5173 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5174 recipe_id = ICE_SW_LKUP_PROMISC;
5177 /* Separate filters must be set for each direction/packet type
5178 * combination, so we will loop over the mask value, store the
5179 * individual type, and clear it out in the input mask as it
5182 while (promisc_mask) {
5183 struct ice_sw_recipe *recp_list;
5189 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5190 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5191 pkt_type = UCAST_FLTR;
5192 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5193 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5194 pkt_type = UCAST_FLTR;
5196 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5197 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5198 pkt_type = MCAST_FLTR;
5199 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5200 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5201 pkt_type = MCAST_FLTR;
5203 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5204 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5205 pkt_type = BCAST_FLTR;
5206 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5207 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5208 pkt_type = BCAST_FLTR;
5212 /* Check for VLAN promiscuous flag */
5213 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5214 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5215 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5216 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5220 /* Set filter DA based on packet type */
5221 mac_addr = new_fltr.l_data.mac.mac_addr;
5222 if (pkt_type == BCAST_FLTR) {
5223 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5224 } else if (pkt_type == MCAST_FLTR ||
5225 pkt_type == UCAST_FLTR) {
5226 /* Use the dummy ether header DA */
5227 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5228 ICE_NONDMA_TO_NONDMA);
5229 if (pkt_type == MCAST_FLTR)
5230 mac_addr[0] |= 0x1; /* Set multicast bit */
5233 /* Need to reset this to zero for all iterations */
5236 new_fltr.flag |= ICE_FLTR_TX;
5237 new_fltr.src = hw_vsi_id;
5239 new_fltr.flag |= ICE_FLTR_RX;
5240 new_fltr.src = lport;
5243 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5244 new_fltr.vsi_handle = vsi_handle;
5245 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5246 f_list_entry.fltr_info = new_fltr;
5247 recp_list = &sw->recp_list[recipe_id];
5249 status = ice_add_rule_internal(hw, recp_list, lport,
5251 if (status != ICE_SUCCESS)
5252 goto set_promisc_exit;
5260 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5261 * @hw: pointer to the hardware structure
5262 * @vsi_handle: VSI handle to configure
5263 * @promisc_mask: mask of promiscuous config bits
5264 * @vid: VLAN ID to set VLAN promiscuous
5267 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5270 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5271 hw->port_info->lport,
5276 * _ice_set_vlan_vsi_promisc
5277 * @hw: pointer to the hardware structure
5278 * @vsi_handle: VSI handle to configure
5279 * @promisc_mask: mask of promiscuous config bits
5280 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5281 * @lport: logical port number to configure promisc mode
5282 * @sw: pointer to switch info struct for which function add rule
5284 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5286 static enum ice_status
5287 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5288 bool rm_vlan_promisc, u8 lport,
5289 struct ice_switch_info *sw)
5291 struct ice_fltr_list_entry *list_itr, *tmp;
5292 struct LIST_HEAD_TYPE vsi_list_head;
5293 struct LIST_HEAD_TYPE *vlan_head;
5294 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5295 enum ice_status status;
5298 INIT_LIST_HEAD(&vsi_list_head);
5299 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5300 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5301 ice_acquire_lock(vlan_lock);
5302 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5304 ice_release_lock(vlan_lock);
5306 goto free_fltr_list;
5308 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5310 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5311 if (rm_vlan_promisc)
5312 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5316 status = _ice_set_vsi_promisc(hw, vsi_handle,
5317 promisc_mask, vlan_id,
5324 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5325 ice_fltr_list_entry, list_entry) {
5326 LIST_DEL(&list_itr->list_entry);
5327 ice_free(hw, list_itr);
5333 * ice_set_vlan_vsi_promisc
5334 * @hw: pointer to the hardware structure
5335 * @vsi_handle: VSI handle to configure
5336 * @promisc_mask: mask of promiscuous config bits
5337 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5339 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5342 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5343 bool rm_vlan_promisc)
5345 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5346 rm_vlan_promisc, hw->port_info->lport,
5351 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5352 * @hw: pointer to the hardware structure
5353 * @vsi_handle: VSI handle to remove filters from
5354 * @recp_list: recipe list from which function remove fltr
5355 * @lkup: switch rule filter lookup type
5358 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5359 struct ice_sw_recipe *recp_list,
5360 enum ice_sw_lkup_type lkup)
5362 struct ice_fltr_list_entry *fm_entry;
5363 struct LIST_HEAD_TYPE remove_list_head;
5364 struct LIST_HEAD_TYPE *rule_head;
5365 struct ice_fltr_list_entry *tmp;
5366 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5367 enum ice_status status;
5369 INIT_LIST_HEAD(&remove_list_head);
5370 rule_lock = &recp_list[lkup].filt_rule_lock;
5371 rule_head = &recp_list[lkup].filt_rules;
5372 ice_acquire_lock(rule_lock);
5373 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5375 ice_release_lock(rule_lock);
5380 case ICE_SW_LKUP_MAC:
5381 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5383 case ICE_SW_LKUP_VLAN:
5384 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5386 case ICE_SW_LKUP_PROMISC:
5387 case ICE_SW_LKUP_PROMISC_VLAN:
5388 ice_remove_promisc(hw, lkup, &remove_list_head);
5390 case ICE_SW_LKUP_MAC_VLAN:
5391 ice_remove_mac_vlan(hw, &remove_list_head);
5393 case ICE_SW_LKUP_ETHERTYPE:
5394 case ICE_SW_LKUP_ETHERTYPE_MAC:
5395 ice_remove_eth_mac(hw, &remove_list_head);
5397 case ICE_SW_LKUP_DFLT:
5398 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5400 case ICE_SW_LKUP_LAST:
5401 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5405 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5406 ice_fltr_list_entry, list_entry) {
5407 LIST_DEL(&fm_entry->list_entry);
5408 ice_free(hw, fm_entry);
5413 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5414 * @hw: pointer to the hardware structure
5415 * @vsi_handle: VSI handle to remove filters from
5416 * @sw: pointer to switch info struct
5419 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5420 struct ice_switch_info *sw)
5422 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5424 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5425 sw->recp_list, ICE_SW_LKUP_MAC);
5426 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5427 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5428 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5429 sw->recp_list, ICE_SW_LKUP_PROMISC);
5430 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5431 sw->recp_list, ICE_SW_LKUP_VLAN);
5432 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5433 sw->recp_list, ICE_SW_LKUP_DFLT);
5434 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5435 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5436 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5437 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5438 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5439 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5443 * ice_remove_vsi_fltr - Remove all filters for a VSI
5444 * @hw: pointer to the hardware structure
5445 * @vsi_handle: VSI handle to remove filters from
5447 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5449 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5453 * ice_alloc_res_cntr - allocating resource counter
5454 * @hw: pointer to the hardware structure
5455 * @type: type of resource
5456 * @alloc_shared: if set it is shared else dedicated
5457 * @num_items: number of entries requested for FD resource type
5458 * @counter_id: counter index returned by AQ call
5461 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5464 struct ice_aqc_alloc_free_res_elem *buf;
5465 enum ice_status status;
5468 /* Allocate resource */
5469 buf_len = ice_struct_size(buf, elem, 1);
5470 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5472 return ICE_ERR_NO_MEMORY;
5474 buf->num_elems = CPU_TO_LE16(num_items);
5475 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5476 ICE_AQC_RES_TYPE_M) | alloc_shared);
5478 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5479 ice_aqc_opc_alloc_res, NULL);
5483 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5491 * ice_free_res_cntr - free resource counter
5492 * @hw: pointer to the hardware structure
5493 * @type: type of resource
5494 * @alloc_shared: if set it is shared else dedicated
5495 * @num_items: number of entries to be freed for FD resource type
5496 * @counter_id: counter ID resource which needs to be freed
5499 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5502 struct ice_aqc_alloc_free_res_elem *buf;
5503 enum ice_status status;
5507 buf_len = ice_struct_size(buf, elem, 1);
5508 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5510 return ICE_ERR_NO_MEMORY;
5512 buf->num_elems = CPU_TO_LE16(num_items);
5513 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5514 ICE_AQC_RES_TYPE_M) | alloc_shared);
5515 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5517 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5518 ice_aqc_opc_free_res, NULL);
5520 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5527 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5528 * @hw: pointer to the hardware structure
5529 * @counter_id: returns counter index
5531 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5533 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5534 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5539 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5540 * @hw: pointer to the hardware structure
5541 * @counter_id: counter index to be freed
5543 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5545 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5546 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5551 * ice_alloc_res_lg_act - add large action resource
5552 * @hw: pointer to the hardware structure
5553 * @l_id: large action ID to fill it in
5554 * @num_acts: number of actions to hold with a large action entry
5556 static enum ice_status
5557 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5559 struct ice_aqc_alloc_free_res_elem *sw_buf;
5560 enum ice_status status;
5563 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5564 return ICE_ERR_PARAM;
5566 /* Allocate resource for large action */
5567 buf_len = ice_struct_size(sw_buf, elem, 1);
5568 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5570 return ICE_ERR_NO_MEMORY;
5572 sw_buf->num_elems = CPU_TO_LE16(1);
5574 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5575 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5576 * If num_acts is greater than 2, then use
5577 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5578 * The num_acts cannot exceed 4. This was ensured at the
5579 * beginning of the function.
5582 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5583 else if (num_acts == 2)
5584 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5586 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5588 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5589 ice_aqc_opc_alloc_res, NULL);
5591 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5593 ice_free(hw, sw_buf);
5598 * ice_add_mac_with_sw_marker - add filter with sw marker
5599 * @hw: pointer to the hardware structure
5600 * @f_info: filter info structure containing the MAC filter information
5601 * @sw_marker: sw marker to tag the Rx descriptor with
5604 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5607 struct ice_fltr_mgmt_list_entry *m_entry;
5608 struct ice_fltr_list_entry fl_info;
5609 struct ice_sw_recipe *recp_list;
5610 struct LIST_HEAD_TYPE l_head;
5611 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5612 enum ice_status ret;
5616 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5617 return ICE_ERR_PARAM;
5619 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5620 return ICE_ERR_PARAM;
5622 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5623 return ICE_ERR_PARAM;
5625 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5626 return ICE_ERR_PARAM;
5627 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5629 /* Add filter if it doesn't exist so then the adding of large
5630 * action always results in update
5633 INIT_LIST_HEAD(&l_head);
5634 fl_info.fltr_info = *f_info;
5635 LIST_ADD(&fl_info.list_entry, &l_head);
5637 entry_exists = false;
5638 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5639 hw->port_info->lport);
5640 if (ret == ICE_ERR_ALREADY_EXISTS)
5641 entry_exists = true;
5645 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5646 rule_lock = &recp_list->filt_rule_lock;
5647 ice_acquire_lock(rule_lock);
5648 /* Get the book keeping entry for the filter */
5649 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5653 /* If counter action was enabled for this rule then don't enable
5654 * sw marker large action
5656 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5657 ret = ICE_ERR_PARAM;
5661 /* if same marker was added before */
5662 if (m_entry->sw_marker_id == sw_marker) {
5663 ret = ICE_ERR_ALREADY_EXISTS;
5667 /* Allocate a hardware table entry to hold large act. Three actions
5668 * for marker based large action
5670 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5674 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5677 /* Update the switch rule to add the marker action */
5678 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5680 ice_release_lock(rule_lock);
5685 ice_release_lock(rule_lock);
5686 /* only remove entry if it did not exist previously */
5688 ret = ice_remove_mac(hw, &l_head);
5694 * ice_add_mac_with_counter - add filter with counter enabled
5695 * @hw: pointer to the hardware structure
5696 * @f_info: pointer to filter info structure containing the MAC filter
5700 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5702 struct ice_fltr_mgmt_list_entry *m_entry;
5703 struct ice_fltr_list_entry fl_info;
5704 struct ice_sw_recipe *recp_list;
5705 struct LIST_HEAD_TYPE l_head;
5706 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5707 enum ice_status ret;
5712 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5713 return ICE_ERR_PARAM;
5715 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5716 return ICE_ERR_PARAM;
5718 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5719 return ICE_ERR_PARAM;
5720 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5721 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5723 entry_exist = false;
5725 rule_lock = &recp_list->filt_rule_lock;
5727 /* Add filter if it doesn't exist so then the adding of large
5728 * action always results in update
5730 INIT_LIST_HEAD(&l_head);
5732 fl_info.fltr_info = *f_info;
5733 LIST_ADD(&fl_info.list_entry, &l_head);
5735 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5736 hw->port_info->lport);
5737 if (ret == ICE_ERR_ALREADY_EXISTS)
5742 ice_acquire_lock(rule_lock);
5743 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5745 ret = ICE_ERR_BAD_PTR;
5749 /* Don't enable counter for a filter for which sw marker was enabled */
5750 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5751 ret = ICE_ERR_PARAM;
5755 /* If a counter was already enabled then don't need to add again */
5756 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5757 ret = ICE_ERR_ALREADY_EXISTS;
5761 /* Allocate a hardware table entry to VLAN counter */
5762 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5766 /* Allocate a hardware table entry to hold large act. Two actions for
5767 * counter based large action
5769 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5773 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5776 /* Update the switch rule to add the counter action */
5777 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5779 ice_release_lock(rule_lock);
5784 ice_release_lock(rule_lock);
5785 /* only remove entry if it did not exist previously */
5787 ret = ice_remove_mac(hw, &l_head);
5792 /* This is mapping table entry that maps every word within a given protocol
5793 * structure to the real byte offset as per the specification of that
5795 * for example dst address is 3 words in ethertype header and corresponding
5796 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5797 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5798 * matching entry describing its field. This needs to be updated if new
5799 * structure is added to that union.
5801 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5802 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5803 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5804 { ICE_ETYPE_OL, { 0 } },
5805 { ICE_VLAN_OFOS, { 0, 2 } },
5806 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5807 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5808 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5809 26, 28, 30, 32, 34, 36, 38 } },
5810 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5811 26, 28, 30, 32, 34, 36, 38 } },
5812 { ICE_TCP_IL, { 0, 2 } },
5813 { ICE_UDP_OF, { 0, 2 } },
5814 { ICE_UDP_ILOS, { 0, 2 } },
5815 { ICE_SCTP_IL, { 0, 2 } },
5816 { ICE_VXLAN, { 8, 10, 12, 14 } },
5817 { ICE_GENEVE, { 8, 10, 12, 14 } },
5818 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5819 { ICE_NVGRE, { 0, 2, 4, 6 } },
5820 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5821 { ICE_PPPOE, { 0, 2, 4, 6 } },
5822 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5823 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5824 { ICE_ESP, { 0, 2, 4, 6 } },
5825 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5826 { ICE_NAT_T, { 8, 10, 12, 14 } },
5827 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
5830 /* The following table describes preferred grouping of recipes.
5831 * If a recipe that needs to be programmed is a superset or matches one of the
5832 * following combinations, then the recipe needs to be chained as per the
5836 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5837 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5838 { ICE_MAC_IL, ICE_MAC_IL_HW },
5839 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5840 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5841 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5842 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5843 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5844 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5845 { ICE_TCP_IL, ICE_TCP_IL_HW },
5846 { ICE_UDP_OF, ICE_UDP_OF_HW },
5847 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5848 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5849 { ICE_VXLAN, ICE_UDP_OF_HW },
5850 { ICE_GENEVE, ICE_UDP_OF_HW },
5851 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5852 { ICE_NVGRE, ICE_GRE_OF_HW },
5853 { ICE_GTP, ICE_UDP_OF_HW },
5854 { ICE_PPPOE, ICE_PPPOE_HW },
5855 { ICE_PFCP, ICE_UDP_ILOS_HW },
5856 { ICE_L2TPV3, ICE_L2TPV3_HW },
5857 { ICE_ESP, ICE_ESP_HW },
5858 { ICE_AH, ICE_AH_HW },
5859 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5860 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
5864 * ice_find_recp - find a recipe
5865 * @hw: pointer to the hardware structure
5866 * @lkup_exts: extension sequence to match
5868 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5870 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5871 enum ice_sw_tunnel_type tun_type)
5873 bool refresh_required = true;
5874 struct ice_sw_recipe *recp;
5877 /* Walk through existing recipes to find a match */
5878 recp = hw->switch_info->recp_list;
5879 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5880 /* If recipe was not created for this ID, in SW bookkeeping,
5881 * check if FW has an entry for this recipe. If the FW has an
5882 * entry update it in our SW bookkeeping and continue with the
5885 if (!recp[i].recp_created)
5886 if (ice_get_recp_frm_fw(hw,
5887 hw->switch_info->recp_list, i,
5891 /* Skip inverse action recipes */
5892 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5893 ICE_AQ_RECIPE_ACT_INV_ACT)
5896 /* if number of words we are looking for match */
5897 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5898 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5899 struct ice_fv_word *be = lkup_exts->fv_words;
5900 u16 *cr = recp[i].lkup_exts.field_mask;
5901 u16 *de = lkup_exts->field_mask;
5905 /* ar, cr, and qr are related to the recipe words, while
5906 * be, de, and pe are related to the lookup words
5908 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5909 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5911 if (ar[qr].off == be[pe].off &&
5912 ar[qr].prot_id == be[pe].prot_id &&
5914 /* Found the "pe"th word in the
5919 /* After walking through all the words in the
5920 * "i"th recipe if "p"th word was not found then
5921 * this recipe is not what we are looking for.
5922 * So break out from this loop and try the next
5925 if (qr >= recp[i].lkup_exts.n_val_words) {
5930 /* If for "i"th recipe the found was never set to false
5931 * then it means we found our match
5933 if (tun_type == recp[i].tun_type && found)
5934 return i; /* Return the recipe ID */
5937 return ICE_MAX_NUM_RECIPES;
5941 * ice_prot_type_to_id - get protocol ID from protocol type
5942 * @type: protocol type
5943 * @id: pointer to variable that will receive the ID
5945 * Returns true if found, false otherwise
5947 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5951 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5952 if (ice_prot_id_tbl[i].type == type) {
5953 *id = ice_prot_id_tbl[i].protocol_id;
5960 * ice_find_valid_words - count valid words
5961 * @rule: advanced rule with lookup information
5962 * @lkup_exts: byte offset extractions of the words that are valid
5964 * calculate valid words in a lookup rule using mask value
5967 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5968 struct ice_prot_lkup_ext *lkup_exts)
5970 u8 j, word, prot_id, ret_val;
5972 if (!ice_prot_type_to_id(rule->type, &prot_id))
5975 word = lkup_exts->n_val_words;
5977 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5978 if (((u16 *)&rule->m_u)[j] &&
5979 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5980 /* No more space to accommodate */
5981 if (word >= ICE_MAX_CHAIN_WORDS)
5983 lkup_exts->fv_words[word].off =
5984 ice_prot_ext[rule->type].offs[j];
5985 lkup_exts->fv_words[word].prot_id =
5986 ice_prot_id_tbl[rule->type].protocol_id;
5987 lkup_exts->field_mask[word] =
5988 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
5992 ret_val = word - lkup_exts->n_val_words;
5993 lkup_exts->n_val_words = word;
5999 * ice_create_first_fit_recp_def - Create a recipe grouping
6000 * @hw: pointer to the hardware structure
6001 * @lkup_exts: an array of protocol header extractions
6002 * @rg_list: pointer to a list that stores new recipe groups
6003 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6005 * Using first fit algorithm, take all the words that are still not done
6006 * and start grouping them in 4-word groups. Each group makes up one
6009 static enum ice_status
6010 ice_create_first_fit_recp_def(struct ice_hw *hw,
6011 struct ice_prot_lkup_ext *lkup_exts,
6012 struct LIST_HEAD_TYPE *rg_list,
6015 struct ice_pref_recipe_group *grp = NULL;
6020 if (!lkup_exts->n_val_words) {
6021 struct ice_recp_grp_entry *entry;
6023 entry = (struct ice_recp_grp_entry *)
6024 ice_malloc(hw, sizeof(*entry));
6026 return ICE_ERR_NO_MEMORY;
6027 LIST_ADD(&entry->l_entry, rg_list);
6028 grp = &entry->r_group;
6030 grp->n_val_pairs = 0;
6033 /* Walk through every word in the rule to check if it is not done. If so
6034 * then this word needs to be part of a new recipe.
6036 for (j = 0; j < lkup_exts->n_val_words; j++)
6037 if (!ice_is_bit_set(lkup_exts->done, j)) {
6039 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6040 struct ice_recp_grp_entry *entry;
6042 entry = (struct ice_recp_grp_entry *)
6043 ice_malloc(hw, sizeof(*entry));
6045 return ICE_ERR_NO_MEMORY;
6046 LIST_ADD(&entry->l_entry, rg_list);
6047 grp = &entry->r_group;
6051 grp->pairs[grp->n_val_pairs].prot_id =
6052 lkup_exts->fv_words[j].prot_id;
6053 grp->pairs[grp->n_val_pairs].off =
6054 lkup_exts->fv_words[j].off;
6055 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6063 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6064 * @hw: pointer to the hardware structure
6065 * @fv_list: field vector with the extraction sequence information
6066 * @rg_list: recipe groupings with protocol-offset pairs
6068 * Helper function to fill in the field vector indices for protocol-offset
6069 * pairs. These indexes are then ultimately programmed into a recipe.
6071 static enum ice_status
6072 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6073 struct LIST_HEAD_TYPE *rg_list)
6075 struct ice_sw_fv_list_entry *fv;
6076 struct ice_recp_grp_entry *rg;
6077 struct ice_fv_word *fv_ext;
6079 if (LIST_EMPTY(fv_list))
6082 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6083 fv_ext = fv->fv_ptr->ew;
6085 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6088 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6089 struct ice_fv_word *pr;
6094 pr = &rg->r_group.pairs[i];
6095 mask = rg->r_group.mask[i];
6097 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6098 if (fv_ext[j].prot_id == pr->prot_id &&
6099 fv_ext[j].off == pr->off) {
6102 /* Store index of field vector */
6104 rg->fv_mask[i] = mask;
6108 /* Protocol/offset could not be found, caller gave an
6112 return ICE_ERR_PARAM;
6120 * ice_find_free_recp_res_idx - find free result indexes for recipe
6121 * @hw: pointer to hardware structure
6122 * @profiles: bitmap of profiles that will be associated with the new recipe
6123 * @free_idx: pointer to variable to receive the free index bitmap
6125 * The algorithm used here is:
6126 * 1. When creating a new recipe, create a set P which contains all
6127 * Profiles that will be associated with our new recipe
6129 * 2. For each Profile p in set P:
6130 * a. Add all recipes associated with Profile p into set R
6131 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6132 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6133 * i. Or just assume they all have the same possible indexes:
6135 * i.e., PossibleIndexes = 0x0000F00000000000
6137 * 3. For each Recipe r in set R:
6138 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6139 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6141 * FreeIndexes will contain the bits indicating the indexes free for use,
6142 * then the code needs to update the recipe[r].used_result_idx_bits to
6143 * indicate which indexes were selected for use by this recipe.
6146 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6147 ice_bitmap_t *free_idx)
6149 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6150 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6151 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6154 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6155 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6156 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6157 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6159 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6161 /* For each profile we are going to associate the recipe with, add the
6162 * recipes that are associated with that profile. This will give us
6163 * the set of recipes that our recipe may collide with. Also, determine
6164 * what possible result indexes are usable given this set of profiles.
6166 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6167 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6168 ICE_MAX_NUM_RECIPES);
6169 ice_and_bitmap(possible_idx, possible_idx,
6170 hw->switch_info->prof_res_bm[bit],
6174 /* For each recipe that our new recipe may collide with, determine
6175 * which indexes have been used.
6177 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6178 ice_or_bitmap(used_idx, used_idx,
6179 hw->switch_info->recp_list[bit].res_idxs,
6182 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6184 /* return number of free indexes */
6185 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6189 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6190 * @hw: pointer to hardware structure
6191 * @rm: recipe management list entry
6192 * @profiles: bitmap of profiles that will be associated.
6194 static enum ice_status
6195 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6196 ice_bitmap_t *profiles)
6198 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6199 struct ice_aqc_recipe_data_elem *tmp;
6200 struct ice_aqc_recipe_data_elem *buf;
6201 struct ice_recp_grp_entry *entry;
6202 enum ice_status status;
6208 /* When more than one recipe are required, another recipe is needed to
6209 * chain them together. Matching a tunnel metadata ID takes up one of
6210 * the match fields in the chaining recipe reducing the number of
6211 * chained recipes by one.
6213 /* check number of free result indices */
6214 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6215 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6217 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6218 free_res_idx, rm->n_grp_count);
6220 if (rm->n_grp_count > 1) {
6221 if (rm->n_grp_count > free_res_idx)
6222 return ICE_ERR_MAX_LIMIT;
6227 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6228 return ICE_ERR_MAX_LIMIT;
6230 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6231 ICE_MAX_NUM_RECIPES,
6234 return ICE_ERR_NO_MEMORY;
6236 buf = (struct ice_aqc_recipe_data_elem *)
6237 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6239 status = ICE_ERR_NO_MEMORY;
6243 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6244 recipe_count = ICE_MAX_NUM_RECIPES;
6245 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6247 if (status || recipe_count == 0)
6250 /* Allocate the recipe resources, and configure them according to the
6251 * match fields from protocol headers and extracted field vectors.
6253 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6254 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6257 status = ice_alloc_recipe(hw, &entry->rid);
6261 /* Clear the result index of the located recipe, as this will be
6262 * updated, if needed, later in the recipe creation process.
6264 tmp[0].content.result_indx = 0;
6266 buf[recps] = tmp[0];
6267 buf[recps].recipe_indx = (u8)entry->rid;
6268 /* if the recipe is a non-root recipe RID should be programmed
6269 * as 0 for the rules to be applied correctly.
6271 buf[recps].content.rid = 0;
6272 ice_memset(&buf[recps].content.lkup_indx, 0,
6273 sizeof(buf[recps].content.lkup_indx),
6276 /* All recipes use look-up index 0 to match switch ID. */
6277 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6278 buf[recps].content.mask[0] =
6279 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6280 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6283 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6284 buf[recps].content.lkup_indx[i] = 0x80;
6285 buf[recps].content.mask[i] = 0;
6288 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6289 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6290 buf[recps].content.mask[i + 1] =
6291 CPU_TO_LE16(entry->fv_mask[i]);
6294 if (rm->n_grp_count > 1) {
6295 /* Checks to see if there really is a valid result index
6298 if (chain_idx >= ICE_MAX_FV_WORDS) {
6299 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6300 status = ICE_ERR_MAX_LIMIT;
6304 entry->chain_idx = chain_idx;
6305 buf[recps].content.result_indx =
6306 ICE_AQ_RECIPE_RESULT_EN |
6307 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6308 ICE_AQ_RECIPE_RESULT_DATA_M);
6309 ice_clear_bit(chain_idx, result_idx_bm);
6310 chain_idx = ice_find_first_bit(result_idx_bm,
6314 /* fill recipe dependencies */
6315 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6316 ICE_MAX_NUM_RECIPES);
6317 ice_set_bit(buf[recps].recipe_indx,
6318 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6319 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6323 if (rm->n_grp_count == 1) {
6324 rm->root_rid = buf[0].recipe_indx;
6325 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6326 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6327 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6328 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6329 sizeof(buf[0].recipe_bitmap),
6330 ICE_NONDMA_TO_NONDMA);
6332 status = ICE_ERR_BAD_PTR;
6335 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6336 * the recipe which is getting created if specified
6337 * by user. Usually any advanced switch filter, which results
6338 * into new extraction sequence, ended up creating a new recipe
6339 * of type ROOT and usually recipes are associated with profiles
6340 * Switch rule referreing newly created recipe, needs to have
6341 * either/or 'fwd' or 'join' priority, otherwise switch rule
6342 * evaluation will not happen correctly. In other words, if
6343 * switch rule to be evaluated on priority basis, then recipe
6344 * needs to have priority, otherwise it will be evaluated last.
6346 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6348 struct ice_recp_grp_entry *last_chain_entry;
6351 /* Allocate the last recipe that will chain the outcomes of the
6352 * other recipes together
6354 status = ice_alloc_recipe(hw, &rid);
6358 buf[recps].recipe_indx = (u8)rid;
6359 buf[recps].content.rid = (u8)rid;
6360 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6361 /* the new entry created should also be part of rg_list to
6362 * make sure we have complete recipe
6364 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6365 sizeof(*last_chain_entry));
6366 if (!last_chain_entry) {
6367 status = ICE_ERR_NO_MEMORY;
6370 last_chain_entry->rid = rid;
6371 ice_memset(&buf[recps].content.lkup_indx, 0,
6372 sizeof(buf[recps].content.lkup_indx),
6374 /* All recipes use look-up index 0 to match switch ID. */
6375 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6376 buf[recps].content.mask[0] =
6377 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6378 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6379 buf[recps].content.lkup_indx[i] =
6380 ICE_AQ_RECIPE_LKUP_IGNORE;
6381 buf[recps].content.mask[i] = 0;
6385 /* update r_bitmap with the recp that is used for chaining */
6386 ice_set_bit(rid, rm->r_bitmap);
6387 /* this is the recipe that chains all the other recipes so it
6388 * should not have a chaining ID to indicate the same
6390 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6391 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6393 last_chain_entry->fv_idx[i] = entry->chain_idx;
6394 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6395 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6396 ice_set_bit(entry->rid, rm->r_bitmap);
6398 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6399 if (sizeof(buf[recps].recipe_bitmap) >=
6400 sizeof(rm->r_bitmap)) {
6401 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6402 sizeof(buf[recps].recipe_bitmap),
6403 ICE_NONDMA_TO_NONDMA);
6405 status = ICE_ERR_BAD_PTR;
6408 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6411 rm->root_rid = (u8)rid;
6413 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6417 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6418 ice_release_change_lock(hw);
6422 /* Every recipe that just got created add it to the recipe
6425 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6426 struct ice_switch_info *sw = hw->switch_info;
6427 bool is_root, idx_found = false;
6428 struct ice_sw_recipe *recp;
6429 u16 idx, buf_idx = 0;
6431 /* find buffer index for copying some data */
6432 for (idx = 0; idx < rm->n_grp_count; idx++)
6433 if (buf[idx].recipe_indx == entry->rid) {
6439 status = ICE_ERR_OUT_OF_RANGE;
6443 recp = &sw->recp_list[entry->rid];
6444 is_root = (rm->root_rid == entry->rid);
6445 recp->is_root = is_root;
6447 recp->root_rid = entry->rid;
6448 recp->big_recp = (is_root && rm->n_grp_count > 1);
6450 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6451 entry->r_group.n_val_pairs *
6452 sizeof(struct ice_fv_word),
6453 ICE_NONDMA_TO_NONDMA);
6455 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6456 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6458 /* Copy non-result fv index values and masks to recipe. This
6459 * call will also update the result recipe bitmask.
6461 ice_collect_result_idx(&buf[buf_idx], recp);
6463 /* for non-root recipes, also copy to the root, this allows
6464 * easier matching of a complete chained recipe
6467 ice_collect_result_idx(&buf[buf_idx],
6468 &sw->recp_list[rm->root_rid]);
6470 recp->n_ext_words = entry->r_group.n_val_pairs;
6471 recp->chain_idx = entry->chain_idx;
6472 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6473 recp->n_grp_count = rm->n_grp_count;
6474 recp->tun_type = rm->tun_type;
6475 recp->recp_created = true;
6489 * ice_create_recipe_group - creates recipe group
6490 * @hw: pointer to hardware structure
6491 * @rm: recipe management list entry
6492 * @lkup_exts: lookup elements
6494 static enum ice_status
6495 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6496 struct ice_prot_lkup_ext *lkup_exts)
6498 enum ice_status status;
6501 rm->n_grp_count = 0;
6503 /* Create recipes for words that are marked not done by packing them
6506 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6507 &rm->rg_list, &recp_count);
6509 rm->n_grp_count += recp_count;
6510 rm->n_ext_words = lkup_exts->n_val_words;
6511 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6512 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6513 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6514 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6521 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6522 * @hw: pointer to hardware structure
6523 * @lkups: lookup elements or match criteria for the advanced recipe, one
6524 * structure per protocol header
6525 * @lkups_cnt: number of protocols
6526 * @bm: bitmap of field vectors to consider
6527 * @fv_list: pointer to a list that holds the returned field vectors
6529 static enum ice_status
6530 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6531 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6533 enum ice_status status;
6540 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6542 return ICE_ERR_NO_MEMORY;
6544 for (i = 0; i < lkups_cnt; i++)
6545 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6546 status = ICE_ERR_CFG;
6550 /* Find field vectors that include all specified protocol types */
6551 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6554 ice_free(hw, prot_ids);
6559 * ice_tun_type_match_mask - determine if tun type needs a match mask
6560 * @tun_type: tunnel type
6561 * @mask: mask to be used for the tunnel
6563 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6566 case ICE_SW_TUN_VXLAN_GPE:
6567 case ICE_SW_TUN_GENEVE:
6568 case ICE_SW_TUN_VXLAN:
6569 case ICE_SW_TUN_NVGRE:
6570 case ICE_SW_TUN_UDP:
6571 case ICE_ALL_TUNNELS:
6572 *mask = ICE_TUN_FLAG_MASK;
6575 case ICE_SW_TUN_GENEVE_VLAN:
6576 case ICE_SW_TUN_VXLAN_VLAN:
6577 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6587 * ice_add_special_words - Add words that are not protocols, such as metadata
6588 * @rinfo: other information regarding the rule e.g. priority and action info
6589 * @lkup_exts: lookup word structure
6591 static enum ice_status
6592 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6593 struct ice_prot_lkup_ext *lkup_exts)
6597 /* If this is a tunneled packet, then add recipe index to match the
6598 * tunnel bit in the packet metadata flags.
6600 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6601 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6602 u8 word = lkup_exts->n_val_words++;
6604 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6605 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6606 lkup_exts->field_mask[word] = mask;
6608 return ICE_ERR_MAX_LIMIT;
6615 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6616 * @hw: pointer to hardware structure
6617 * @rinfo: other information regarding the rule e.g. priority and action info
6618 * @bm: pointer to memory for returning the bitmap of field vectors
6621 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6624 enum ice_prof_type prof_type;
6626 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6628 switch (rinfo->tun_type) {
6630 prof_type = ICE_PROF_NON_TUN;
6632 case ICE_ALL_TUNNELS:
6633 prof_type = ICE_PROF_TUN_ALL;
6635 case ICE_SW_TUN_VXLAN_GPE:
6636 case ICE_SW_TUN_GENEVE:
6637 case ICE_SW_TUN_GENEVE_VLAN:
6638 case ICE_SW_TUN_VXLAN:
6639 case ICE_SW_TUN_VXLAN_VLAN:
6640 case ICE_SW_TUN_UDP:
6641 case ICE_SW_TUN_GTP:
6642 prof_type = ICE_PROF_TUN_UDP;
6644 case ICE_SW_TUN_NVGRE:
6645 prof_type = ICE_PROF_TUN_GRE;
6647 case ICE_SW_TUN_PPPOE:
6648 prof_type = ICE_PROF_TUN_PPPOE;
6650 case ICE_SW_TUN_PPPOE_PAY:
6651 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6653 case ICE_SW_TUN_PPPOE_IPV4:
6654 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6655 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6656 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6658 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6659 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6661 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6662 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6664 case ICE_SW_TUN_PPPOE_IPV6:
6665 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6666 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6667 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6669 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6670 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6672 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6673 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6675 case ICE_SW_TUN_PROFID_IPV6_ESP:
6676 case ICE_SW_TUN_IPV6_ESP:
6677 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6679 case ICE_SW_TUN_PROFID_IPV6_AH:
6680 case ICE_SW_TUN_IPV6_AH:
6681 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6683 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6684 case ICE_SW_TUN_IPV6_L2TPV3:
6685 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6687 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6688 case ICE_SW_TUN_IPV6_NAT_T:
6689 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6691 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6692 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6694 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6695 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6697 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6698 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6700 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6701 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6703 case ICE_SW_TUN_IPV4_NAT_T:
6704 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6706 case ICE_SW_TUN_IPV4_L2TPV3:
6707 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6709 case ICE_SW_TUN_IPV4_ESP:
6710 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6712 case ICE_SW_TUN_IPV4_AH:
6713 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6715 case ICE_SW_IPV4_TCP:
6716 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6718 case ICE_SW_IPV4_UDP:
6719 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6721 case ICE_SW_IPV6_TCP:
6722 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6724 case ICE_SW_IPV6_UDP:
6725 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6727 case ICE_SW_TUN_IPV4_GTPU_IPV4:
6728 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6729 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6730 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6731 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6732 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6733 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6735 case ICE_SW_TUN_IPV6_GTPU_IPV4:
6736 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6737 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6738 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6739 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6740 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6741 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6743 case ICE_SW_TUN_IPV4_GTPU_IPV6:
6744 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6745 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6746 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6747 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6748 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6749 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6751 case ICE_SW_TUN_IPV6_GTPU_IPV6:
6752 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6753 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6754 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6755 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6756 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6757 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6759 case ICE_SW_TUN_AND_NON_TUN:
6761 prof_type = ICE_PROF_ALL;
6765 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6769 * ice_is_prof_rule - determine if rule type is a profile rule
6770 * @type: the rule type
6772 * if the rule type is a profile rule, that means that there no field value
6773 * match required, in this case just a profile hit is required.
6775 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6778 case ICE_SW_TUN_PROFID_IPV6_ESP:
6779 case ICE_SW_TUN_PROFID_IPV6_AH:
6780 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6781 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6782 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6783 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6784 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6785 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6795 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6796 * @hw: pointer to hardware structure
6797 * @lkups: lookup elements or match criteria for the advanced recipe, one
6798 * structure per protocol header
6799 * @lkups_cnt: number of protocols
6800 * @rinfo: other information regarding the rule e.g. priority and action info
6801 * @rid: return the recipe ID of the recipe created
6803 static enum ice_status
6804 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6805 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6807 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6808 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6809 struct ice_prot_lkup_ext *lkup_exts;
6810 struct ice_recp_grp_entry *r_entry;
6811 struct ice_sw_fv_list_entry *fvit;
6812 struct ice_recp_grp_entry *r_tmp;
6813 struct ice_sw_fv_list_entry *tmp;
6814 enum ice_status status = ICE_SUCCESS;
6815 struct ice_sw_recipe *rm;
6818 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6819 return ICE_ERR_PARAM;
6821 lkup_exts = (struct ice_prot_lkup_ext *)
6822 ice_malloc(hw, sizeof(*lkup_exts));
6824 return ICE_ERR_NO_MEMORY;
6826 /* Determine the number of words to be matched and if it exceeds a
6827 * recipe's restrictions
6829 for (i = 0; i < lkups_cnt; i++) {
6832 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6833 status = ICE_ERR_CFG;
6834 goto err_free_lkup_exts;
6837 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6839 status = ICE_ERR_CFG;
6840 goto err_free_lkup_exts;
6844 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6846 status = ICE_ERR_NO_MEMORY;
6847 goto err_free_lkup_exts;
6850 /* Get field vectors that contain fields extracted from all the protocol
6851 * headers being programmed.
6853 INIT_LIST_HEAD(&rm->fv_list);
6854 INIT_LIST_HEAD(&rm->rg_list);
6856 /* Get bitmap of field vectors (profiles) that are compatible with the
6857 * rule request; only these will be searched in the subsequent call to
6860 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6862 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6866 /* Create any special protocol/offset pairs, such as looking at tunnel
6867 * bits by extracting metadata
6869 status = ice_add_special_words(rinfo, lkup_exts);
6871 goto err_free_lkup_exts;
6873 /* Group match words into recipes using preferred recipe grouping
6876 status = ice_create_recipe_group(hw, rm, lkup_exts);
6880 /* set the recipe priority if specified */
6881 rm->priority = (u8)rinfo->priority;
6883 /* Find offsets from the field vector. Pick the first one for all the
6886 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6890 /* An empty FV list means to use all the profiles returned in the
6893 if (LIST_EMPTY(&rm->fv_list)) {
6896 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
6897 struct ice_sw_fv_list_entry *fvl;
6899 fvl = (struct ice_sw_fv_list_entry *)
6900 ice_malloc(hw, sizeof(*fvl));
6904 fvl->profile_id = j;
6905 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6909 /* get bitmap of all profiles the recipe will be associated with */
6910 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6911 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6913 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6914 ice_set_bit((u16)fvit->profile_id, profiles);
6917 /* Look for a recipe which matches our requested fv / mask list */
6918 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6919 if (*rid < ICE_MAX_NUM_RECIPES)
6920 /* Success if found a recipe that match the existing criteria */
6923 rm->tun_type = rinfo->tun_type;
6924 /* Recipe we need does not exist, add a recipe */
6925 status = ice_add_sw_recipe(hw, rm, profiles);
6929 /* Associate all the recipes created with all the profiles in the
6930 * common field vector.
6932 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6934 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6937 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6938 (u8 *)r_bitmap, NULL);
6942 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6943 ICE_MAX_NUM_RECIPES);
6944 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6948 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6951 ice_release_change_lock(hw);
6956 /* Update profile to recipe bitmap array */
6957 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6958 ICE_MAX_NUM_RECIPES);
6960 /* Update recipe to profile bitmap array */
6961 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
6962 ice_set_bit((u16)fvit->profile_id,
6963 recipe_to_profile[j]);
6966 *rid = rm->root_rid;
6967 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6968 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6970 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6971 ice_recp_grp_entry, l_entry) {
6972 LIST_DEL(&r_entry->l_entry);
6973 ice_free(hw, r_entry);
6976 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6978 LIST_DEL(&fvit->list_entry);
6983 ice_free(hw, rm->root_buf);
6988 ice_free(hw, lkup_exts);
6994 * ice_find_dummy_packet - find dummy packet by tunnel type
6996 * @lkups: lookup elements or match criteria for the advanced recipe, one
6997 * structure per protocol header
6998 * @lkups_cnt: number of protocols
6999 * @tun_type: tunnel type from the match criteria
7000 * @pkt: dummy packet to fill according to filter match criteria
7001 * @pkt_len: packet length of dummy packet
7002 * @offsets: pointer to receive the pointer to the offsets for the packet
7005 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7006 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7008 const struct ice_dummy_pkt_offsets **offsets)
7010 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7014 for (i = 0; i < lkups_cnt; i++) {
7015 if (lkups[i].type == ICE_UDP_ILOS)
7017 else if (lkups[i].type == ICE_TCP_IL)
7019 else if (lkups[i].type == ICE_IPV6_OFOS)
7021 else if (lkups[i].type == ICE_VLAN_OFOS)
7023 else if (lkups[i].type == ICE_IPV4_OFOS &&
7024 lkups[i].h_u.ipv4_hdr.protocol ==
7025 ICE_IPV4_NVGRE_PROTO_ID &&
7026 lkups[i].m_u.ipv4_hdr.protocol ==
7029 else if (lkups[i].type == ICE_PPPOE &&
7030 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7031 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7032 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7035 else if (lkups[i].type == ICE_ETYPE_OL &&
7036 lkups[i].h_u.ethertype.ethtype_id ==
7037 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7038 lkups[i].m_u.ethertype.ethtype_id ==
7041 else if (lkups[i].type == ICE_IPV4_IL &&
7042 lkups[i].h_u.ipv4_hdr.protocol ==
7044 lkups[i].m_u.ipv4_hdr.protocol ==
7049 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7050 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7051 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7052 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7054 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7055 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7056 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7057 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7059 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7060 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7061 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7062 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7064 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7065 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7066 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7067 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7069 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7070 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7071 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7072 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7074 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7075 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7076 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7077 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7081 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7082 *pkt = dummy_ipv4_esp_pkt;
7083 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7084 *offsets = dummy_ipv4_esp_packet_offsets;
7088 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7089 *pkt = dummy_ipv6_esp_pkt;
7090 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7091 *offsets = dummy_ipv6_esp_packet_offsets;
7095 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7096 *pkt = dummy_ipv4_ah_pkt;
7097 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7098 *offsets = dummy_ipv4_ah_packet_offsets;
7102 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7103 *pkt = dummy_ipv6_ah_pkt;
7104 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7105 *offsets = dummy_ipv6_ah_packet_offsets;
7109 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7110 *pkt = dummy_ipv4_nat_pkt;
7111 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7112 *offsets = dummy_ipv4_nat_packet_offsets;
7116 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7117 *pkt = dummy_ipv6_nat_pkt;
7118 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7119 *offsets = dummy_ipv6_nat_packet_offsets;
7123 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7124 *pkt = dummy_ipv4_l2tpv3_pkt;
7125 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7126 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7130 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7131 *pkt = dummy_ipv6_l2tpv3_pkt;
7132 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7133 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7137 if (tun_type == ICE_SW_TUN_GTP) {
7138 *pkt = dummy_udp_gtp_packet;
7139 *pkt_len = sizeof(dummy_udp_gtp_packet);
7140 *offsets = dummy_udp_gtp_packet_offsets;
7144 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7145 *pkt = dummy_pppoe_ipv6_packet;
7146 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7147 *offsets = dummy_pppoe_packet_offsets;
7149 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7150 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7151 *pkt = dummy_pppoe_ipv4_packet;
7152 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7153 *offsets = dummy_pppoe_packet_offsets;
7157 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7158 *pkt = dummy_pppoe_ipv4_packet;
7159 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7160 *offsets = dummy_pppoe_packet_ipv4_offsets;
7164 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7165 *pkt = dummy_pppoe_ipv4_tcp_packet;
7166 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7167 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7171 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7172 *pkt = dummy_pppoe_ipv4_udp_packet;
7173 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7174 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7178 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7179 *pkt = dummy_pppoe_ipv6_packet;
7180 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7181 *offsets = dummy_pppoe_packet_ipv6_offsets;
7185 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7186 *pkt = dummy_pppoe_ipv6_tcp_packet;
7187 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7188 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7192 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7193 *pkt = dummy_pppoe_ipv6_udp_packet;
7194 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7195 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7199 if (tun_type == ICE_SW_IPV4_TCP) {
7200 *pkt = dummy_tcp_packet;
7201 *pkt_len = sizeof(dummy_tcp_packet);
7202 *offsets = dummy_tcp_packet_offsets;
7206 if (tun_type == ICE_SW_IPV4_UDP) {
7207 *pkt = dummy_udp_packet;
7208 *pkt_len = sizeof(dummy_udp_packet);
7209 *offsets = dummy_udp_packet_offsets;
7213 if (tun_type == ICE_SW_IPV6_TCP) {
7214 *pkt = dummy_tcp_ipv6_packet;
7215 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7216 *offsets = dummy_tcp_ipv6_packet_offsets;
7220 if (tun_type == ICE_SW_IPV6_UDP) {
7221 *pkt = dummy_udp_ipv6_packet;
7222 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7223 *offsets = dummy_udp_ipv6_packet_offsets;
7227 if (tun_type == ICE_ALL_TUNNELS) {
7228 *pkt = dummy_gre_udp_packet;
7229 *pkt_len = sizeof(dummy_gre_udp_packet);
7230 *offsets = dummy_gre_udp_packet_offsets;
7234 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7236 *pkt = dummy_gre_tcp_packet;
7237 *pkt_len = sizeof(dummy_gre_tcp_packet);
7238 *offsets = dummy_gre_tcp_packet_offsets;
7242 *pkt = dummy_gre_udp_packet;
7243 *pkt_len = sizeof(dummy_gre_udp_packet);
7244 *offsets = dummy_gre_udp_packet_offsets;
7248 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7249 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7250 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7251 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7253 *pkt = dummy_udp_tun_tcp_packet;
7254 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7255 *offsets = dummy_udp_tun_tcp_packet_offsets;
7259 *pkt = dummy_udp_tun_udp_packet;
7260 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7261 *offsets = dummy_udp_tun_udp_packet_offsets;
7267 *pkt = dummy_vlan_udp_packet;
7268 *pkt_len = sizeof(dummy_vlan_udp_packet);
7269 *offsets = dummy_vlan_udp_packet_offsets;
7272 *pkt = dummy_udp_packet;
7273 *pkt_len = sizeof(dummy_udp_packet);
7274 *offsets = dummy_udp_packet_offsets;
7276 } else if (udp && ipv6) {
7278 *pkt = dummy_vlan_udp_ipv6_packet;
7279 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7280 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7283 *pkt = dummy_udp_ipv6_packet;
7284 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7285 *offsets = dummy_udp_ipv6_packet_offsets;
7287 } else if ((tcp && ipv6) || ipv6) {
7289 *pkt = dummy_vlan_tcp_ipv6_packet;
7290 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7291 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7294 *pkt = dummy_tcp_ipv6_packet;
7295 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7296 *offsets = dummy_tcp_ipv6_packet_offsets;
7301 *pkt = dummy_vlan_tcp_packet;
7302 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7303 *offsets = dummy_vlan_tcp_packet_offsets;
7305 *pkt = dummy_tcp_packet;
7306 *pkt_len = sizeof(dummy_tcp_packet);
7307 *offsets = dummy_tcp_packet_offsets;
7312 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7314 * @lkups: lookup elements or match criteria for the advanced recipe, one
7315 * structure per protocol header
7316 * @lkups_cnt: number of protocols
7317 * @s_rule: stores rule information from the match criteria
7318 * @dummy_pkt: dummy packet to fill according to filter match criteria
7319 * @pkt_len: packet length of dummy packet
7320 * @offsets: offset info for the dummy packet
7322 static enum ice_status
7323 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7324 struct ice_aqc_sw_rules_elem *s_rule,
7325 const u8 *dummy_pkt, u16 pkt_len,
7326 const struct ice_dummy_pkt_offsets *offsets)
7331 /* Start with a packet with a pre-defined/dummy content. Then, fill
7332 * in the header values to be looked up or matched.
7334 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7336 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7338 for (i = 0; i < lkups_cnt; i++) {
7339 enum ice_protocol_type type;
7340 u16 offset = 0, len = 0, j;
7343 /* find the start of this layer; it should be found since this
7344 * was already checked when search for the dummy packet
7346 type = lkups[i].type;
7347 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7348 if (type == offsets[j].type) {
7349 offset = offsets[j].offset;
7354 /* this should never happen in a correct calling sequence */
7356 return ICE_ERR_PARAM;
7358 switch (lkups[i].type) {
7361 len = sizeof(struct ice_ether_hdr);
7364 len = sizeof(struct ice_ethtype_hdr);
7367 len = sizeof(struct ice_vlan_hdr);
7371 len = sizeof(struct ice_ipv4_hdr);
7375 len = sizeof(struct ice_ipv6_hdr);
7380 len = sizeof(struct ice_l4_hdr);
7383 len = sizeof(struct ice_sctp_hdr);
7386 len = sizeof(struct ice_nvgre);
7391 len = sizeof(struct ice_udp_tnl_hdr);
7395 case ICE_GTP_NO_PAY:
7396 len = sizeof(struct ice_udp_gtp_hdr);
7399 len = sizeof(struct ice_pppoe_hdr);
7402 len = sizeof(struct ice_esp_hdr);
7405 len = sizeof(struct ice_nat_t_hdr);
7408 len = sizeof(struct ice_ah_hdr);
7411 len = sizeof(struct ice_l2tpv3_sess_hdr);
7414 return ICE_ERR_PARAM;
7417 /* the length should be a word multiple */
7418 if (len % ICE_BYTES_PER_WORD)
7421 /* We have the offset to the header start, the length, the
7422 * caller's header values and mask. Use this information to
7423 * copy the data into the dummy packet appropriately based on
7424 * the mask. Note that we need to only write the bits as
7425 * indicated by the mask to make sure we don't improperly write
7426 * over any significant packet data.
7428 for (j = 0; j < len / sizeof(u16); j++)
7429 if (((u16 *)&lkups[i].m_u)[j])
7430 ((u16 *)(pkt + offset))[j] =
7431 (((u16 *)(pkt + offset))[j] &
7432 ~((u16 *)&lkups[i].m_u)[j]) |
7433 (((u16 *)&lkups[i].h_u)[j] &
7434 ((u16 *)&lkups[i].m_u)[j]);
7437 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7443 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7444 * @hw: pointer to the hardware structure
7445 * @tun_type: tunnel type
7446 * @pkt: dummy packet to fill in
7447 * @offsets: offset info for the dummy packet
7449 static enum ice_status
7450 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7451 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7456 case ICE_SW_TUN_AND_NON_TUN:
7457 case ICE_SW_TUN_VXLAN_GPE:
7458 case ICE_SW_TUN_VXLAN:
7459 case ICE_SW_TUN_VXLAN_VLAN:
7460 case ICE_SW_TUN_UDP:
7461 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7465 case ICE_SW_TUN_GENEVE:
7466 case ICE_SW_TUN_GENEVE_VLAN:
7467 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7472 /* Nothing needs to be done for this tunnel type */
7476 /* Find the outer UDP protocol header and insert the port number */
7477 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7478 if (offsets[i].type == ICE_UDP_OF) {
7479 struct ice_l4_hdr *hdr;
7482 offset = offsets[i].offset;
7483 hdr = (struct ice_l4_hdr *)&pkt[offset];
7484 hdr->dst_port = CPU_TO_BE16(open_port);
7494 * ice_find_adv_rule_entry - Search a rule entry
7495 * @hw: pointer to the hardware structure
7496 * @lkups: lookup elements or match criteria for the advanced recipe, one
7497 * structure per protocol header
7498 * @lkups_cnt: number of protocols
7499 * @recp_id: recipe ID for which we are finding the rule
7500 * @rinfo: other information regarding the rule e.g. priority and action info
7502 * Helper function to search for a given advance rule entry
7503 * Returns pointer to entry storing the rule if found
7505 static struct ice_adv_fltr_mgmt_list_entry *
7506 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7507 u16 lkups_cnt, u16 recp_id,
7508 struct ice_adv_rule_info *rinfo)
7510 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7511 struct ice_switch_info *sw = hw->switch_info;
7514 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7515 ice_adv_fltr_mgmt_list_entry, list_entry) {
7516 bool lkups_matched = true;
7518 if (lkups_cnt != list_itr->lkups_cnt)
7520 for (i = 0; i < list_itr->lkups_cnt; i++)
7521 if (memcmp(&list_itr->lkups[i], &lkups[i],
7523 lkups_matched = false;
7526 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7527 rinfo->tun_type == list_itr->rule_info.tun_type &&
7535 * ice_adv_add_update_vsi_list
7536 * @hw: pointer to the hardware structure
7537 * @m_entry: pointer to current adv filter management list entry
7538 * @cur_fltr: filter information from the book keeping entry
7539 * @new_fltr: filter information with the new VSI to be added
7541 * Call AQ command to add or update previously created VSI list with new VSI.
7543 * Helper function to do book keeping associated with adding filter information
7544 * The algorithm to do the booking keeping is described below :
7545 * When a VSI needs to subscribe to a given advanced filter
7546 * if only one VSI has been added till now
7547 * Allocate a new VSI list and add two VSIs
7548 * to this list using switch rule command
7549 * Update the previously created switch rule with the
7550 * newly created VSI list ID
7551 * if a VSI list was previously created
7552 * Add the new VSI to the previously created VSI list set
7553 * using the update switch rule command
7555 static enum ice_status
7556 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7557 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7558 struct ice_adv_rule_info *cur_fltr,
7559 struct ice_adv_rule_info *new_fltr)
7561 enum ice_status status;
7562 u16 vsi_list_id = 0;
7564 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7565 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7566 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7567 return ICE_ERR_NOT_IMPL;
7569 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7570 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7571 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7572 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7573 return ICE_ERR_NOT_IMPL;
7575 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7576 /* Only one entry existed in the mapping and it was not already
7577 * a part of a VSI list. So, create a VSI list with the old and
7580 struct ice_fltr_info tmp_fltr;
7581 u16 vsi_handle_arr[2];
7583 /* A rule already exists with the new VSI being added */
7584 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7585 new_fltr->sw_act.fwd_id.hw_vsi_id)
7586 return ICE_ERR_ALREADY_EXISTS;
7588 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7589 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7590 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7596 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7597 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7598 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7599 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7600 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7601 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7603 /* Update the previous switch rule of "forward to VSI" to
7606 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7610 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7611 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7612 m_entry->vsi_list_info =
7613 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7616 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7618 if (!m_entry->vsi_list_info)
7621 /* A rule already exists with the new VSI being added */
7622 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7625 /* Update the previously created VSI list set with
7626 * the new VSI ID passed in
7628 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7630 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7632 ice_aqc_opc_update_sw_rules,
7634 /* update VSI list mapping info with new VSI ID */
7636 ice_set_bit(vsi_handle,
7637 m_entry->vsi_list_info->vsi_map);
7640 m_entry->vsi_count++;
7645 * ice_add_adv_rule - helper function to create an advanced switch rule
7646 * @hw: pointer to the hardware structure
7647 * @lkups: information on the words that needs to be looked up. All words
7648 * together makes one recipe
7649 * @lkups_cnt: num of entries in the lkups array
7650 * @rinfo: other information related to the rule that needs to be programmed
7651 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7652 * ignored is case of error.
7654 * This function can program only 1 rule at a time. The lkups is used to
7655 * describe the all the words that forms the "lookup" portion of the recipe.
7656 * These words can span multiple protocols. Callers to this function need to
7657 * pass in a list of protocol headers with lookup information along and mask
7658 * that determines which words are valid from the given protocol header.
7659 * rinfo describes other information related to this rule such as forwarding
7660 * IDs, priority of this rule, etc.
7663 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7664 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7665 struct ice_rule_query_data *added_entry)
7667 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7668 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7669 const struct ice_dummy_pkt_offsets *pkt_offsets;
7670 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7671 struct LIST_HEAD_TYPE *rule_head;
7672 struct ice_switch_info *sw;
7673 enum ice_status status;
7674 const u8 *pkt = NULL;
7680 /* Initialize profile to result index bitmap */
7681 if (!hw->switch_info->prof_res_bm_init) {
7682 hw->switch_info->prof_res_bm_init = 1;
7683 ice_init_prof_result_bm(hw);
7686 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7687 if (!prof_rule && !lkups_cnt)
7688 return ICE_ERR_PARAM;
7690 /* get # of words we need to match */
7692 for (i = 0; i < lkups_cnt; i++) {
7695 ptr = (u16 *)&lkups[i].m_u;
7696 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7702 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7703 return ICE_ERR_PARAM;
7705 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7706 return ICE_ERR_PARAM;
7709 /* make sure that we can locate a dummy packet */
7710 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7713 status = ICE_ERR_PARAM;
7714 goto err_ice_add_adv_rule;
7717 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7718 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7719 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7720 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7723 vsi_handle = rinfo->sw_act.vsi_handle;
7724 if (!ice_is_vsi_valid(hw, vsi_handle))
7725 return ICE_ERR_PARAM;
7727 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7728 rinfo->sw_act.fwd_id.hw_vsi_id =
7729 ice_get_hw_vsi_num(hw, vsi_handle);
7730 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7731 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7733 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7736 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7738 /* we have to add VSI to VSI_LIST and increment vsi_count.
7739 * Also Update VSI list so that we can change forwarding rule
7740 * if the rule already exists, we will check if it exists with
7741 * same vsi_id, if not then add it to the VSI list if it already
7742 * exists if not then create a VSI list and add the existing VSI
7743 * ID and the new VSI ID to the list
7744 * We will add that VSI to the list
7746 status = ice_adv_add_update_vsi_list(hw, m_entry,
7747 &m_entry->rule_info,
7750 added_entry->rid = rid;
7751 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7752 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7756 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7757 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7759 return ICE_ERR_NO_MEMORY;
7760 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7761 switch (rinfo->sw_act.fltr_act) {
7762 case ICE_FWD_TO_VSI:
7763 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7764 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7765 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7768 act |= ICE_SINGLE_ACT_TO_Q;
7769 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7770 ICE_SINGLE_ACT_Q_INDEX_M;
7772 case ICE_FWD_TO_QGRP:
7773 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7774 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7775 act |= ICE_SINGLE_ACT_TO_Q;
7776 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7777 ICE_SINGLE_ACT_Q_INDEX_M;
7778 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7779 ICE_SINGLE_ACT_Q_REGION_M;
7781 case ICE_DROP_PACKET:
7782 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7783 ICE_SINGLE_ACT_VALID_BIT;
7786 status = ICE_ERR_CFG;
7787 goto err_ice_add_adv_rule;
7790 /* set the rule LOOKUP type based on caller specified 'RX'
7791 * instead of hardcoding it to be either LOOKUP_TX/RX
7793 * for 'RX' set the source to be the port number
7794 * for 'TX' set the source to be the source HW VSI number (determined
7798 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7799 s_rule->pdata.lkup_tx_rx.src =
7800 CPU_TO_LE16(hw->port_info->lport);
7802 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7803 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7806 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7807 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7809 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7810 pkt_len, pkt_offsets);
7812 goto err_ice_add_adv_rule;
7814 if (rinfo->tun_type != ICE_NON_TUN &&
7815 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7816 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7817 s_rule->pdata.lkup_tx_rx.hdr,
7820 goto err_ice_add_adv_rule;
7823 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7824 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7827 goto err_ice_add_adv_rule;
7828 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7829 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7831 status = ICE_ERR_NO_MEMORY;
7832 goto err_ice_add_adv_rule;
7835 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7836 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7837 ICE_NONDMA_TO_NONDMA);
7838 if (!adv_fltr->lkups && !prof_rule) {
7839 status = ICE_ERR_NO_MEMORY;
7840 goto err_ice_add_adv_rule;
7843 adv_fltr->lkups_cnt = lkups_cnt;
7844 adv_fltr->rule_info = *rinfo;
7845 adv_fltr->rule_info.fltr_rule_id =
7846 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7847 sw = hw->switch_info;
7848 sw->recp_list[rid].adv_rule = true;
7849 rule_head = &sw->recp_list[rid].filt_rules;
7851 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7852 adv_fltr->vsi_count = 1;
7854 /* Add rule entry to book keeping list */
7855 LIST_ADD(&adv_fltr->list_entry, rule_head);
7857 added_entry->rid = rid;
7858 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7859 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7861 err_ice_add_adv_rule:
7862 if (status && adv_fltr) {
7863 ice_free(hw, adv_fltr->lkups);
7864 ice_free(hw, adv_fltr);
7867 ice_free(hw, s_rule);
7873 * ice_adv_rem_update_vsi_list
7874 * @hw: pointer to the hardware structure
7875 * @vsi_handle: VSI handle of the VSI to remove
7876 * @fm_list: filter management entry for which the VSI list management needs to
7879 static enum ice_status
7880 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7881 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7883 struct ice_vsi_list_map_info *vsi_list_info;
7884 enum ice_sw_lkup_type lkup_type;
7885 enum ice_status status;
7888 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7889 fm_list->vsi_count == 0)
7890 return ICE_ERR_PARAM;
7892 /* A rule with the VSI being removed does not exist */
7893 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7894 return ICE_ERR_DOES_NOT_EXIST;
7896 lkup_type = ICE_SW_LKUP_LAST;
7897 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7898 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7899 ice_aqc_opc_update_sw_rules,
7904 fm_list->vsi_count--;
7905 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7906 vsi_list_info = fm_list->vsi_list_info;
7907 if (fm_list->vsi_count == 1) {
7908 struct ice_fltr_info tmp_fltr;
7911 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7913 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7914 return ICE_ERR_OUT_OF_RANGE;
7916 /* Make sure VSI list is empty before removing it below */
7917 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7919 ice_aqc_opc_update_sw_rules,
7924 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7925 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7926 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7927 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7928 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7929 tmp_fltr.fwd_id.hw_vsi_id =
7930 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7931 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7932 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7933 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7935 /* Update the previous switch rule of "MAC forward to VSI" to
7936 * "MAC fwd to VSI list"
7938 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7940 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7941 tmp_fltr.fwd_id.hw_vsi_id, status);
7944 fm_list->vsi_list_info->ref_cnt--;
7946 /* Remove the VSI list since it is no longer used */
7947 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7949 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
7950 vsi_list_id, status);
7954 LIST_DEL(&vsi_list_info->list_entry);
7955 ice_free(hw, vsi_list_info);
7956 fm_list->vsi_list_info = NULL;
7963 * ice_rem_adv_rule - removes existing advanced switch rule
7964 * @hw: pointer to the hardware structure
7965 * @lkups: information on the words that needs to be looked up. All words
7966 * together makes one recipe
7967 * @lkups_cnt: num of entries in the lkups array
7968 * @rinfo: Its the pointer to the rule information for the rule
7970 * This function can be used to remove 1 rule at a time. The lkups is
7971 * used to describe all the words that forms the "lookup" portion of the
7972 * rule. These words can span multiple protocols. Callers to this function
7973 * need to pass in a list of protocol headers with lookup information along
7974 * and mask that determines which words are valid from the given protocol
7975 * header. rinfo describes other information related to this rule such as
7976 * forwarding IDs, priority of this rule, etc.
7979 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7980 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7982 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7983 struct ice_prot_lkup_ext lkup_exts;
7984 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7985 enum ice_status status = ICE_SUCCESS;
7986 bool remove_rule = false;
7987 u16 i, rid, vsi_handle;
7989 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7990 for (i = 0; i < lkups_cnt; i++) {
7993 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7996 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8001 /* Create any special protocol/offset pairs, such as looking at tunnel
8002 * bits by extracting metadata
8004 status = ice_add_special_words(rinfo, &lkup_exts);
8008 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8009 /* If did not find a recipe that match the existing criteria */
8010 if (rid == ICE_MAX_NUM_RECIPES)
8011 return ICE_ERR_PARAM;
8013 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8014 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8015 /* the rule is already removed */
8018 ice_acquire_lock(rule_lock);
8019 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8021 } else if (list_elem->vsi_count > 1) {
8022 remove_rule = false;
8023 vsi_handle = rinfo->sw_act.vsi_handle;
8024 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8026 vsi_handle = rinfo->sw_act.vsi_handle;
8027 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8029 ice_release_lock(rule_lock);
8032 if (list_elem->vsi_count == 0)
8035 ice_release_lock(rule_lock);
8037 struct ice_aqc_sw_rules_elem *s_rule;
8040 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8042 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
8045 return ICE_ERR_NO_MEMORY;
8046 s_rule->pdata.lkup_tx_rx.act = 0;
8047 s_rule->pdata.lkup_tx_rx.index =
8048 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8049 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8050 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8052 ice_aqc_opc_remove_sw_rules, NULL);
8053 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8054 struct ice_switch_info *sw = hw->switch_info;
8056 ice_acquire_lock(rule_lock);
8057 LIST_DEL(&list_elem->list_entry);
8058 ice_free(hw, list_elem->lkups);
8059 ice_free(hw, list_elem);
8060 ice_release_lock(rule_lock);
8061 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8062 sw->recp_list[rid].adv_rule = false;
8064 ice_free(hw, s_rule);
8070 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8071 * @hw: pointer to the hardware structure
8072 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8074 * This function is used to remove 1 rule at a time. The removal is based on
8075 * the remove_entry parameter. This function will remove rule for a given
8076 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8079 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8080 struct ice_rule_query_data *remove_entry)
8082 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8083 struct LIST_HEAD_TYPE *list_head;
8084 struct ice_adv_rule_info rinfo;
8085 struct ice_switch_info *sw;
8087 sw = hw->switch_info;
8088 if (!sw->recp_list[remove_entry->rid].recp_created)
8089 return ICE_ERR_PARAM;
8090 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8091 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8093 if (list_itr->rule_info.fltr_rule_id ==
8094 remove_entry->rule_id) {
8095 rinfo = list_itr->rule_info;
8096 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8097 return ice_rem_adv_rule(hw, list_itr->lkups,
8098 list_itr->lkups_cnt, &rinfo);
8101 /* either list is empty or unable to find rule */
8102 return ICE_ERR_DOES_NOT_EXIST;
8106 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8108 * @hw: pointer to the hardware structure
8109 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8111 * This function is used to remove all the rules for a given VSI and as soon
8112 * as removing a rule fails, it will return immediately with the error code,
8113 * else it will return ICE_SUCCESS
8115 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8117 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8118 struct ice_vsi_list_map_info *map_info;
8119 struct LIST_HEAD_TYPE *list_head;
8120 struct ice_adv_rule_info rinfo;
8121 struct ice_switch_info *sw;
8122 enum ice_status status;
8125 sw = hw->switch_info;
8126 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8127 if (!sw->recp_list[rid].recp_created)
8129 if (!sw->recp_list[rid].adv_rule)
8132 list_head = &sw->recp_list[rid].filt_rules;
8133 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8134 ice_adv_fltr_mgmt_list_entry,
8136 rinfo = list_itr->rule_info;
8138 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8139 map_info = list_itr->vsi_list_info;
8143 if (!ice_is_bit_set(map_info->vsi_map,
8146 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8150 rinfo.sw_act.vsi_handle = vsi_handle;
8151 status = ice_rem_adv_rule(hw, list_itr->lkups,
8152 list_itr->lkups_cnt, &rinfo);
8162 * ice_replay_fltr - Replay all the filters stored by a specific list head
8163 * @hw: pointer to the hardware structure
8164 * @list_head: list for which filters needs to be replayed
8165 * @recp_id: Recipe ID for which rules need to be replayed
8167 static enum ice_status
8168 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8170 struct ice_fltr_mgmt_list_entry *itr;
8171 enum ice_status status = ICE_SUCCESS;
8172 struct ice_sw_recipe *recp_list;
8173 u8 lport = hw->port_info->lport;
8174 struct LIST_HEAD_TYPE l_head;
8176 if (LIST_EMPTY(list_head))
8179 recp_list = &hw->switch_info->recp_list[recp_id];
8180 /* Move entries from the given list_head to a temporary l_head so that
8181 * they can be replayed. Otherwise when trying to re-add the same
8182 * filter, the function will return already exists
8184 LIST_REPLACE_INIT(list_head, &l_head);
8186 /* Mark the given list_head empty by reinitializing it so filters
8187 * could be added again by *handler
8189 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8191 struct ice_fltr_list_entry f_entry;
8194 f_entry.fltr_info = itr->fltr_info;
8195 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8196 status = ice_add_rule_internal(hw, recp_list, lport,
8198 if (status != ICE_SUCCESS)
8203 /* Add a filter per VSI separately */
8204 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8206 if (!ice_is_vsi_valid(hw, vsi_handle))
8209 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8210 f_entry.fltr_info.vsi_handle = vsi_handle;
8211 f_entry.fltr_info.fwd_id.hw_vsi_id =
8212 ice_get_hw_vsi_num(hw, vsi_handle);
8213 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8214 if (recp_id == ICE_SW_LKUP_VLAN)
8215 status = ice_add_vlan_internal(hw, recp_list,
8218 status = ice_add_rule_internal(hw, recp_list,
8221 if (status != ICE_SUCCESS)
8226 /* Clear the filter management list */
8227 ice_rem_sw_rule_info(hw, &l_head);
8232 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8233 * @hw: pointer to the hardware structure
8235 * NOTE: This function does not clean up partially added filters on error.
8236 * It is up to caller of the function to issue a reset or fail early.
8238 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8240 struct ice_switch_info *sw = hw->switch_info;
8241 enum ice_status status = ICE_SUCCESS;
8244 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8245 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8247 status = ice_replay_fltr(hw, i, head);
8248 if (status != ICE_SUCCESS)
8255 * ice_replay_vsi_fltr - Replay filters for requested VSI
8256 * @hw: pointer to the hardware structure
8257 * @pi: pointer to port information structure
8258 * @sw: pointer to switch info struct for which function replays filters
8259 * @vsi_handle: driver VSI handle
8260 * @recp_id: Recipe ID for which rules need to be replayed
8261 * @list_head: list for which filters need to be replayed
8263 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8264 * It is required to pass valid VSI handle.
8266 static enum ice_status
8267 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8268 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8269 struct LIST_HEAD_TYPE *list_head)
8271 struct ice_fltr_mgmt_list_entry *itr;
8272 enum ice_status status = ICE_SUCCESS;
8273 struct ice_sw_recipe *recp_list;
8276 if (LIST_EMPTY(list_head))
8278 recp_list = &sw->recp_list[recp_id];
8279 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8281 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8283 struct ice_fltr_list_entry f_entry;
8285 f_entry.fltr_info = itr->fltr_info;
8286 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8287 itr->fltr_info.vsi_handle == vsi_handle) {
8288 /* update the src in case it is VSI num */
8289 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8290 f_entry.fltr_info.src = hw_vsi_id;
8291 status = ice_add_rule_internal(hw, recp_list,
8294 if (status != ICE_SUCCESS)
8298 if (!itr->vsi_list_info ||
8299 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8301 /* Clearing it so that the logic can add it back */
8302 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8303 f_entry.fltr_info.vsi_handle = vsi_handle;
8304 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8305 /* update the src in case it is VSI num */
8306 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8307 f_entry.fltr_info.src = hw_vsi_id;
8308 if (recp_id == ICE_SW_LKUP_VLAN)
8309 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8311 status = ice_add_rule_internal(hw, recp_list,
8314 if (status != ICE_SUCCESS)
8322 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8323 * @hw: pointer to the hardware structure
8324 * @vsi_handle: driver VSI handle
8325 * @list_head: list for which filters need to be replayed
8327 * Replay the advanced rule for the given VSI.
8329 static enum ice_status
8330 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8331 struct LIST_HEAD_TYPE *list_head)
8333 struct ice_rule_query_data added_entry = { 0 };
8334 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8335 enum ice_status status = ICE_SUCCESS;
8337 if (LIST_EMPTY(list_head))
8339 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8341 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8342 u16 lk_cnt = adv_fltr->lkups_cnt;
8344 if (vsi_handle != rinfo->sw_act.vsi_handle)
8346 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8355 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8356 * @hw: pointer to the hardware structure
8357 * @pi: pointer to port information structure
8358 * @vsi_handle: driver VSI handle
8360 * Replays filters for requested VSI via vsi_handle.
8363 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8366 struct ice_switch_info *sw = hw->switch_info;
8367 enum ice_status status;
8370 /* Update the recipes that were created */
8371 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8372 struct LIST_HEAD_TYPE *head;
8374 head = &sw->recp_list[i].filt_replay_rules;
8375 if (!sw->recp_list[i].adv_rule)
8376 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8379 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8380 if (status != ICE_SUCCESS)
8388 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8389 * @hw: pointer to the HW struct
8390 * @sw: pointer to switch info struct for which function removes filters
8392 * Deletes the filter replay rules for given switch
8394 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8401 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8402 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8403 struct LIST_HEAD_TYPE *l_head;
8405 l_head = &sw->recp_list[i].filt_replay_rules;
8406 if (!sw->recp_list[i].adv_rule)
8407 ice_rem_sw_rule_info(hw, l_head);
8409 ice_rem_adv_rule_info(hw, l_head);
8415 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8416 * @hw: pointer to the HW struct
8418 * Deletes the filter replay rules.
8420 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8422 ice_rm_sw_replay_rule_info(hw, hw->switch_info);