1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
508 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
510 { ICE_IPV4_OFOS, 14 },
515 { ICE_PROTOCOL_LAST, 0 },
518 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
519 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
524 0x45, 0x00, 0x00, 0x58, /* IP 14 */
525 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x11, 0x00, 0x00,
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
530 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
531 0x00, 0x44, 0x00, 0x00,
533 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
534 0x00, 0x00, 0x00, 0x00,
535 0x00, 0x00, 0x00, 0x85,
537 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
538 0x00, 0x00, 0x00, 0x00,
540 0x45, 0x00, 0x00, 0x28, /* IP 62 */
541 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x06, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
546 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00,
549 0x50, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, /* 2 bytes for 4 byte alignment */
555 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
556 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
558 { ICE_IPV4_OFOS, 14 },
562 { ICE_UDP_ILOS, 82 },
563 { ICE_PROTOCOL_LAST, 0 },
566 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
567 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
568 0x00, 0x00, 0x00, 0x00,
569 0x00, 0x00, 0x00, 0x00,
572 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x11, 0x00, 0x00,
575 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
579 0x00, 0x38, 0x00, 0x00,
581 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x85,
585 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
586 0x00, 0x00, 0x00, 0x00,
588 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x11, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
595 0x00, 0x08, 0x00, 0x00,
597 0x00, 0x00, /* 2 bytes for 4 byte alignment */
600 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
601 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
603 { ICE_IPV4_OFOS, 14 },
608 { ICE_PROTOCOL_LAST, 0 },
611 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
612 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
617 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x11, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
624 0x00, 0x58, 0x00, 0x00,
626 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
627 0x00, 0x00, 0x00, 0x00,
628 0x00, 0x00, 0x00, 0x85,
630 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
631 0x00, 0x00, 0x00, 0x00,
633 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
634 0x00, 0x14, 0x06, 0x00,
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x50, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, /* 2 bytes for 4 byte alignment */
653 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
655 { ICE_IPV4_OFOS, 14 },
659 { ICE_UDP_ILOS, 102 },
660 { ICE_PROTOCOL_LAST, 0 },
663 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
664 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
669 0x45, 0x00, 0x00, 0x60, /* IP 14 */
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x11, 0x00, 0x00,
672 0x00, 0x00, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
675 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
676 0x00, 0x4c, 0x00, 0x00,
678 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
679 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x85,
682 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
683 0x00, 0x00, 0x00, 0x00,
685 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
686 0x00, 0x08, 0x11, 0x00,
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
697 0x00, 0x08, 0x00, 0x00,
699 0x00, 0x00, /* 2 bytes for 4 byte alignment */
702 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
704 { ICE_IPV6_OFOS, 14 },
709 { ICE_PROTOCOL_LAST, 0 },
712 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
713 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
718 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
719 0x00, 0x44, 0x11, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
730 0x00, 0x44, 0x00, 0x00,
732 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
733 0x00, 0x00, 0x00, 0x00,
734 0x00, 0x00, 0x00, 0x85,
736 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
737 0x00, 0x00, 0x00, 0x00,
739 0x45, 0x00, 0x00, 0x28, /* IP 82 */
740 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x06, 0x00, 0x00,
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
745 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x50, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, /* 2 bytes for 4 byte alignment */
754 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
756 { ICE_IPV6_OFOS, 14 },
760 { ICE_UDP_ILOS, 102 },
761 { ICE_PROTOCOL_LAST, 0 },
764 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
765 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
770 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
771 0x00, 0x38, 0x11, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
781 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
782 0x00, 0x38, 0x00, 0x00,
784 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
785 0x00, 0x00, 0x00, 0x00,
786 0x00, 0x00, 0x00, 0x85,
788 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
789 0x00, 0x00, 0x00, 0x00,
791 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x11, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
798 0x00, 0x08, 0x00, 0x00,
800 0x00, 0x00, /* 2 bytes for 4 byte alignment */
803 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
805 { ICE_IPV6_OFOS, 14 },
810 { ICE_PROTOCOL_LAST, 0 },
813 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
814 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
819 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
820 0x00, 0x58, 0x11, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
830 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
831 0x00, 0x58, 0x00, 0x00,
833 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
834 0x00, 0x00, 0x00, 0x00,
835 0x00, 0x00, 0x00, 0x85,
837 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
838 0x00, 0x00, 0x00, 0x00,
840 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
841 0x00, 0x14, 0x06, 0x00,
842 0x00, 0x00, 0x00, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x50, 0x00, 0x00, 0x00,
855 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, /* 2 bytes for 4 byte alignment */
860 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
862 { ICE_IPV6_OFOS, 14 },
866 { ICE_UDP_ILOS, 102 },
867 { ICE_PROTOCOL_LAST, 0 },
870 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
871 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
876 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
877 0x00, 0x4c, 0x11, 0x00,
878 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
888 0x00, 0x4c, 0x00, 0x00,
890 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
891 0x00, 0x00, 0x00, 0x00,
892 0x00, 0x00, 0x00, 0x85,
894 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
895 0x00, 0x00, 0x00, 0x00,
897 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
898 0x00, 0x08, 0x11, 0x00,
899 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
908 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
909 0x00, 0x08, 0x00, 0x00,
911 0x00, 0x00, /* 2 bytes for 4 byte alignment */
914 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
916 { ICE_IPV4_OFOS, 14 },
919 { ICE_PROTOCOL_LAST, 0 },
922 static const u8 dummy_udp_gtp_packet[] = {
923 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
928 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
929 0x00, 0x00, 0x00, 0x00,
930 0x00, 0x11, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
935 0x00, 0x1c, 0x00, 0x00,
937 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
938 0x00, 0x00, 0x00, 0x00,
939 0x00, 0x00, 0x00, 0x85,
941 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
942 0x00, 0x00, 0x00, 0x00,
946 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
948 { ICE_IPV4_OFOS, 14 },
952 { ICE_PROTOCOL_LAST, 0 },
955 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
956 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
957 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x00, 0x00,
961 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
962 0x00, 0x00, 0x40, 0x00,
963 0x40, 0x11, 0x00, 0x00,
964 0x00, 0x00, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
967 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
968 0x00, 0x00, 0x00, 0x00,
970 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x85,
974 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
975 0x00, 0x00, 0x00, 0x00,
977 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
978 0x00, 0x00, 0x40, 0x00,
979 0x40, 0x00, 0x00, 0x00,
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x00,
986 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
988 { ICE_IPV4_OFOS, 14 },
992 { ICE_PROTOCOL_LAST, 0 },
995 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
996 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
997 0x00, 0x00, 0x00, 0x00,
998 0x00, 0x00, 0x00, 0x00,
1001 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
1002 0x00, 0x00, 0x40, 0x00,
1003 0x40, 0x11, 0x00, 0x00,
1004 0x00, 0x00, 0x00, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1007 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1008 0x00, 0x00, 0x00, 0x00,
1010 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x85,
1014 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1015 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1018 0x00, 0x00, 0x3b, 0x00,
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1032 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1033 { ICE_MAC_OFOS, 0 },
1034 { ICE_IPV6_OFOS, 14 },
1037 { ICE_IPV4_IL, 82 },
1038 { ICE_PROTOCOL_LAST, 0 },
1041 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1042 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1043 0x00, 0x00, 0x00, 0x00,
1044 0x00, 0x00, 0x00, 0x00,
1047 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1048 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00,
1058 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1059 0x00, 0x00, 0x00, 0x00,
1061 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1062 0x00, 0x00, 0x00, 0x00,
1063 0x00, 0x00, 0x00, 0x85,
1065 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1066 0x00, 0x00, 0x00, 0x00,
1068 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1069 0x00, 0x00, 0x40, 0x00,
1070 0x40, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1072 0x00, 0x00, 0x00, 0x00,
1078 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1079 { ICE_MAC_OFOS, 0 },
1080 { ICE_IPV6_OFOS, 14 },
1083 { ICE_IPV6_IL, 82 },
1084 { ICE_PROTOCOL_LAST, 0 },
1087 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1088 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1093 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1094 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1095 0x00, 0x00, 0x00, 0x00,
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1104 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1105 0x00, 0x00, 0x00, 0x00,
1107 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1108 0x00, 0x00, 0x00, 0x00,
1109 0x00, 0x00, 0x00, 0x85,
1111 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1112 0x00, 0x00, 0x00, 0x00,
1114 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1115 0x00, 0x00, 0x3b, 0x00,
1116 0x00, 0x00, 0x00, 0x00,
1117 0x00, 0x00, 0x00, 0x00,
1118 0x00, 0x00, 0x00, 0x00,
1119 0x00, 0x00, 0x00, 0x00,
1120 0x00, 0x00, 0x00, 0x00,
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x00,
1123 0x00, 0x00, 0x00, 0x00,
1129 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1130 { ICE_MAC_OFOS, 0 },
1131 { ICE_IPV4_OFOS, 14 },
1133 { ICE_GTP_NO_PAY, 42 },
1134 { ICE_PROTOCOL_LAST, 0 },
1138 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1139 { ICE_MAC_OFOS, 0 },
1140 { ICE_IPV6_OFOS, 14 },
1142 { ICE_GTP_NO_PAY, 62 },
1143 { ICE_PROTOCOL_LAST, 0 },
1146 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1147 { ICE_MAC_OFOS, 0 },
1148 { ICE_ETYPE_OL, 12 },
1149 { ICE_VLAN_OFOS, 14},
1151 { ICE_PROTOCOL_LAST, 0 },
1154 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1155 { ICE_MAC_OFOS, 0 },
1156 { ICE_ETYPE_OL, 12 },
1157 { ICE_VLAN_OFOS, 14},
1159 { ICE_IPV4_OFOS, 26 },
1160 { ICE_PROTOCOL_LAST, 0 },
1163 static const u8 dummy_pppoe_ipv4_packet[] = {
1164 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1165 0x00, 0x00, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1168 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1170 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1172 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1175 0x00, 0x21, /* PPP Link Layer 24 */
1177 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1183 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1187 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1188 { ICE_MAC_OFOS, 0 },
1189 { ICE_ETYPE_OL, 12 },
1190 { ICE_VLAN_OFOS, 14},
1192 { ICE_IPV4_OFOS, 26 },
1194 { ICE_PROTOCOL_LAST, 0 },
1197 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1198 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1199 0x00, 0x00, 0x00, 0x00,
1200 0x00, 0x00, 0x00, 0x00,
1202 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1204 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1206 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1209 0x00, 0x21, /* PPP Link Layer 24 */
1211 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1212 0x00, 0x01, 0x00, 0x00,
1213 0x00, 0x06, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00,
1215 0x00, 0x00, 0x00, 0x00,
1217 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1218 0x00, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x00, 0x00,
1220 0x50, 0x00, 0x00, 0x00,
1221 0x00, 0x00, 0x00, 0x00,
1223 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1227 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1228 { ICE_MAC_OFOS, 0 },
1229 { ICE_ETYPE_OL, 12 },
1230 { ICE_VLAN_OFOS, 14},
1232 { ICE_IPV4_OFOS, 26 },
1233 { ICE_UDP_ILOS, 46 },
1234 { ICE_PROTOCOL_LAST, 0 },
1237 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1239 0x00, 0x00, 0x00, 0x00,
1240 0x00, 0x00, 0x00, 0x00,
1242 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1244 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1246 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1249 0x00, 0x21, /* PPP Link Layer 24 */
1251 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1252 0x00, 0x01, 0x00, 0x00,
1253 0x00, 0x11, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1257 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1258 0x00, 0x08, 0x00, 0x00,
1260 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1263 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1264 { ICE_MAC_OFOS, 0 },
1265 { ICE_ETYPE_OL, 12 },
1266 { ICE_VLAN_OFOS, 14},
1268 { ICE_IPV6_OFOS, 26 },
1269 { ICE_PROTOCOL_LAST, 0 },
1272 static const u8 dummy_pppoe_ipv6_packet[] = {
1273 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1274 0x00, 0x00, 0x00, 0x00,
1275 0x00, 0x00, 0x00, 0x00,
1277 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1279 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1281 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1284 0x00, 0x57, /* PPP Link Layer 24 */
1286 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1287 0x00, 0x00, 0x3b, 0x00,
1288 0x00, 0x00, 0x00, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x00, 0x00,
1297 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1301 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1302 { ICE_MAC_OFOS, 0 },
1303 { ICE_ETYPE_OL, 12 },
1304 { ICE_VLAN_OFOS, 14},
1306 { ICE_IPV6_OFOS, 26 },
1308 { ICE_PROTOCOL_LAST, 0 },
1311 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1312 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1313 0x00, 0x00, 0x00, 0x00,
1314 0x00, 0x00, 0x00, 0x00,
1316 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1318 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1320 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1323 0x00, 0x57, /* PPP Link Layer 24 */
1325 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1326 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1327 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1334 0x00, 0x00, 0x00, 0x00,
1336 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1337 0x00, 0x00, 0x00, 0x00,
1338 0x00, 0x00, 0x00, 0x00,
1339 0x50, 0x00, 0x00, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1346 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1347 { ICE_MAC_OFOS, 0 },
1348 { ICE_ETYPE_OL, 12 },
1349 { ICE_VLAN_OFOS, 14},
1351 { ICE_IPV6_OFOS, 26 },
1352 { ICE_UDP_ILOS, 66 },
1353 { ICE_PROTOCOL_LAST, 0 },
1356 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1357 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1358 0x00, 0x00, 0x00, 0x00,
1359 0x00, 0x00, 0x00, 0x00,
1361 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1363 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1365 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1368 0x00, 0x57, /* PPP Link Layer 24 */
1370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1371 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1372 0x00, 0x00, 0x00, 0x00,
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1381 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1382 0x00, 0x08, 0x00, 0x00,
1384 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1387 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1388 { ICE_MAC_OFOS, 0 },
1389 { ICE_IPV4_OFOS, 14 },
1391 { ICE_PROTOCOL_LAST, 0 },
1394 static const u8 dummy_ipv4_esp_pkt[] = {
1395 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1396 0x00, 0x00, 0x00, 0x00,
1397 0x00, 0x00, 0x00, 0x00,
1400 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1401 0x00, 0x00, 0x40, 0x00,
1402 0x40, 0x32, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1404 0x00, 0x00, 0x00, 0x00,
1406 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1407 0x00, 0x00, 0x00, 0x00,
1408 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1411 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1412 { ICE_MAC_OFOS, 0 },
1413 { ICE_IPV6_OFOS, 14 },
1415 { ICE_PROTOCOL_LAST, 0 },
1418 static const u8 dummy_ipv6_esp_pkt[] = {
1419 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1420 0x00, 0x00, 0x00, 0x00,
1421 0x00, 0x00, 0x00, 0x00,
1424 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1425 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1426 0x00, 0x00, 0x00, 0x00,
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1433 0x00, 0x00, 0x00, 0x00,
1435 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1436 0x00, 0x00, 0x00, 0x00,
1437 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1440 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1441 { ICE_MAC_OFOS, 0 },
1442 { ICE_IPV4_OFOS, 14 },
1444 { ICE_PROTOCOL_LAST, 0 },
1447 static const u8 dummy_ipv4_ah_pkt[] = {
1448 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1449 0x00, 0x00, 0x00, 0x00,
1450 0x00, 0x00, 0x00, 0x00,
1453 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1454 0x00, 0x00, 0x40, 0x00,
1455 0x40, 0x33, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1457 0x00, 0x00, 0x00, 0x00,
1459 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1460 0x00, 0x00, 0x00, 0x00,
1461 0x00, 0x00, 0x00, 0x00,
1462 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1465 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1466 { ICE_MAC_OFOS, 0 },
1467 { ICE_IPV6_OFOS, 14 },
1469 { ICE_PROTOCOL_LAST, 0 },
1472 static const u8 dummy_ipv6_ah_pkt[] = {
1473 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1474 0x00, 0x00, 0x00, 0x00,
1475 0x00, 0x00, 0x00, 0x00,
1478 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1479 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1480 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1487 0x00, 0x00, 0x00, 0x00,
1489 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1490 0x00, 0x00, 0x00, 0x00,
1491 0x00, 0x00, 0x00, 0x00,
1492 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1495 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1496 { ICE_MAC_OFOS, 0 },
1497 { ICE_IPV4_OFOS, 14 },
1498 { ICE_UDP_ILOS, 34 },
1500 { ICE_PROTOCOL_LAST, 0 },
1503 static const u8 dummy_ipv4_nat_pkt[] = {
1504 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1505 0x00, 0x00, 0x00, 0x00,
1506 0x00, 0x00, 0x00, 0x00,
1509 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1510 0x00, 0x00, 0x40, 0x00,
1511 0x40, 0x11, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1513 0x00, 0x00, 0x00, 0x00,
1515 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1516 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, 0x00, 0x00,
1520 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1523 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1524 { ICE_MAC_OFOS, 0 },
1525 { ICE_IPV6_OFOS, 14 },
1526 { ICE_UDP_ILOS, 54 },
1528 { ICE_PROTOCOL_LAST, 0 },
1531 static const u8 dummy_ipv6_nat_pkt[] = {
1532 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1533 0x00, 0x00, 0x00, 0x00,
1534 0x00, 0x00, 0x00, 0x00,
1537 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1538 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1546 0x00, 0x00, 0x00, 0x00,
1548 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1549 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1557 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1558 { ICE_MAC_OFOS, 0 },
1559 { ICE_IPV4_OFOS, 14 },
1561 { ICE_PROTOCOL_LAST, 0 },
1564 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1565 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1566 0x00, 0x00, 0x00, 0x00,
1567 0x00, 0x00, 0x00, 0x00,
1570 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1571 0x00, 0x00, 0x40, 0x00,
1572 0x40, 0x73, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1574 0x00, 0x00, 0x00, 0x00,
1576 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1577 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, 0x00, 0x00,
1579 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1582 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1583 { ICE_MAC_OFOS, 0 },
1584 { ICE_IPV6_OFOS, 14 },
1586 { ICE_PROTOCOL_LAST, 0 },
1589 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1590 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1591 0x00, 0x00, 0x00, 0x00,
1592 0x00, 0x00, 0x00, 0x00,
1595 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1596 0x00, 0x0c, 0x73, 0x40,
1597 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1604 0x00, 0x00, 0x00, 0x00,
1606 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1607 0x00, 0x00, 0x00, 0x00,
1608 0x00, 0x00, 0x00, 0x00,
1609 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1612 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1613 { ICE_MAC_OFOS, 0 },
1614 { ICE_VLAN_EX, 14 },
1615 { ICE_VLAN_OFOS, 18 },
1616 { ICE_IPV4_OFOS, 22 },
1617 { ICE_PROTOCOL_LAST, 0 },
1620 static const u8 dummy_qinq_ipv4_pkt[] = {
1621 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1622 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00,
1626 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1627 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1629 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1630 0x00, 0x01, 0x00, 0x00,
1631 0x00, 0x11, 0x00, 0x00,
1632 0x00, 0x00, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1635 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1636 0x00, 0x08, 0x00, 0x00,
1638 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1641 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1642 { ICE_MAC_OFOS, 0 },
1643 { ICE_VLAN_EX, 14 },
1644 { ICE_VLAN_OFOS, 18 },
1645 { ICE_IPV6_OFOS, 22 },
1646 { ICE_PROTOCOL_LAST, 0 },
1649 static const u8 dummy_qinq_ipv6_pkt[] = {
1650 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1651 0x00, 0x00, 0x00, 0x00,
1652 0x00, 0x00, 0x00, 0x00,
1655 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1656 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1658 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1659 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1660 0x00, 0x00, 0x00, 0x00,
1661 0x00, 0x00, 0x00, 0x00,
1662 0x00, 0x00, 0x00, 0x00,
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1670 0x00, 0x10, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1673 0x00, 0x00, 0x00, 0x00,
1675 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1678 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1679 { ICE_MAC_OFOS, 0 },
1680 { ICE_VLAN_EX, 14 },
1681 { ICE_VLAN_OFOS, 18 },
1683 { ICE_PROTOCOL_LAST, 0 },
1687 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1688 { ICE_MAC_OFOS, 0 },
1689 { ICE_VLAN_EX, 14 },
1690 { ICE_VLAN_OFOS, 18 },
1692 { ICE_IPV4_OFOS, 30 },
1693 { ICE_PROTOCOL_LAST, 0 },
1696 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1697 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1698 0x00, 0x00, 0x00, 0x00,
1699 0x00, 0x00, 0x00, 0x00,
1702 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1703 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1705 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1708 0x00, 0x21, /* PPP Link Layer 28 */
1710 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1711 0x00, 0x00, 0x00, 0x00,
1712 0x00, 0x00, 0x00, 0x00,
1713 0x00, 0x00, 0x00, 0x00,
1714 0x00, 0x00, 0x00, 0x00,
1716 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1720 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1721 { ICE_MAC_OFOS, 0 },
1722 { ICE_ETYPE_OL, 12 },
1724 { ICE_VLAN_OFOS, 18 },
1726 { ICE_IPV6_OFOS, 30 },
1727 { ICE_PROTOCOL_LAST, 0 },
1730 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1731 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1732 0x00, 0x00, 0x00, 0x00,
1733 0x00, 0x00, 0x00, 0x00,
1735 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1737 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1738 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1740 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1743 0x00, 0x57, /* PPP Link Layer 28*/
1745 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1746 0x00, 0x00, 0x3b, 0x00,
1747 0x00, 0x00, 0x00, 0x00,
1748 0x00, 0x00, 0x00, 0x00,
1749 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, 0x00, 0x00,
1751 0x00, 0x00, 0x00, 0x00,
1752 0x00, 0x00, 0x00, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1759 /* this is a recipe to profile association bitmap */
1760 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1761 ICE_MAX_NUM_PROFILES);
1763 /* this is a profile to recipe association bitmap */
1764 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1765 ICE_MAX_NUM_RECIPES);
1767 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1770 * ice_collect_result_idx - copy result index values
1771 * @buf: buffer that contains the result index
1772 * @recp: the recipe struct to copy data into
1774 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1775 struct ice_sw_recipe *recp)
1777 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1778 ice_set_bit(buf->content.result_indx &
1779 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1783 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1784 * @rid: recipe ID that we are populating
1786 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1788 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1789 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1790 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1791 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1792 enum ice_sw_tunnel_type tun_type;
1793 u16 i, j, profile_num = 0;
1794 bool non_tun_valid = false;
1795 bool pppoe_valid = false;
1796 bool vxlan_valid = false;
1797 bool gre_valid = false;
1798 bool gtp_valid = false;
1799 bool flag_valid = false;
1801 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1802 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1807 for (i = 0; i < 12; i++) {
1808 if (gre_profile[i] == j)
1812 for (i = 0; i < 12; i++) {
1813 if (vxlan_profile[i] == j)
1817 for (i = 0; i < 7; i++) {
1818 if (pppoe_profile[i] == j)
1822 for (i = 0; i < 6; i++) {
1823 if (non_tun_profile[i] == j)
1824 non_tun_valid = true;
1827 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1828 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1831 if ((j >= ICE_PROFID_IPV4_ESP &&
1832 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1833 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1834 j <= ICE_PROFID_IPV6_GTPU_TEID))
1838 if (!non_tun_valid && vxlan_valid)
1839 tun_type = ICE_SW_TUN_VXLAN;
1840 else if (!non_tun_valid && gre_valid)
1841 tun_type = ICE_SW_TUN_NVGRE;
1842 else if (!non_tun_valid && pppoe_valid)
1843 tun_type = ICE_SW_TUN_PPPOE;
1844 else if (!non_tun_valid && gtp_valid)
1845 tun_type = ICE_SW_TUN_GTP;
1846 else if (non_tun_valid &&
1847 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1848 tun_type = ICE_SW_TUN_AND_NON_TUN;
1849 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1851 tun_type = ICE_NON_TUN;
1853 tun_type = ICE_NON_TUN;
1855 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1856 i = ice_is_bit_set(recipe_to_profile[rid],
1857 ICE_PROFID_PPPOE_IPV4_OTHER);
1858 j = ice_is_bit_set(recipe_to_profile[rid],
1859 ICE_PROFID_PPPOE_IPV6_OTHER);
1861 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1863 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1866 if (tun_type == ICE_SW_TUN_GTP) {
1867 if (ice_is_bit_set(recipe_to_profile[rid],
1868 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1869 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1870 else if (ice_is_bit_set(recipe_to_profile[rid],
1871 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1872 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1873 else if (ice_is_bit_set(recipe_to_profile[rid],
1874 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1875 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1876 else if (ice_is_bit_set(recipe_to_profile[rid],
1877 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1878 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1881 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1882 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1883 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1885 case ICE_PROFID_IPV4_TCP:
1886 tun_type = ICE_SW_IPV4_TCP;
1888 case ICE_PROFID_IPV4_UDP:
1889 tun_type = ICE_SW_IPV4_UDP;
1891 case ICE_PROFID_IPV6_TCP:
1892 tun_type = ICE_SW_IPV6_TCP;
1894 case ICE_PROFID_IPV6_UDP:
1895 tun_type = ICE_SW_IPV6_UDP;
1897 case ICE_PROFID_PPPOE_PAY:
1898 tun_type = ICE_SW_TUN_PPPOE_PAY;
1900 case ICE_PROFID_PPPOE_IPV4_TCP:
1901 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1903 case ICE_PROFID_PPPOE_IPV4_UDP:
1904 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1906 case ICE_PROFID_PPPOE_IPV4_OTHER:
1907 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1909 case ICE_PROFID_PPPOE_IPV6_TCP:
1910 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1912 case ICE_PROFID_PPPOE_IPV6_UDP:
1913 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1915 case ICE_PROFID_PPPOE_IPV6_OTHER:
1916 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1918 case ICE_PROFID_IPV4_ESP:
1919 tun_type = ICE_SW_TUN_IPV4_ESP;
1921 case ICE_PROFID_IPV6_ESP:
1922 tun_type = ICE_SW_TUN_IPV6_ESP;
1924 case ICE_PROFID_IPV4_AH:
1925 tun_type = ICE_SW_TUN_IPV4_AH;
1927 case ICE_PROFID_IPV6_AH:
1928 tun_type = ICE_SW_TUN_IPV6_AH;
1930 case ICE_PROFID_IPV4_NAT_T:
1931 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1933 case ICE_PROFID_IPV6_NAT_T:
1934 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1936 case ICE_PROFID_IPV4_PFCP_NODE:
1938 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1940 case ICE_PROFID_IPV6_PFCP_NODE:
1942 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1944 case ICE_PROFID_IPV4_PFCP_SESSION:
1946 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1948 case ICE_PROFID_IPV6_PFCP_SESSION:
1950 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1952 case ICE_PROFID_MAC_IPV4_L2TPV3:
1953 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1955 case ICE_PROFID_MAC_IPV6_L2TPV3:
1956 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1958 case ICE_PROFID_IPV4_GTPU_TEID:
1959 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1961 case ICE_PROFID_IPV6_GTPU_TEID:
1962 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1973 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1974 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1975 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1976 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1977 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1978 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1979 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1980 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1981 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1982 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1983 else if (vlan && tun_type == ICE_NON_TUN)
1984 tun_type = ICE_NON_TUN_QINQ;
1990 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1991 * @hw: pointer to hardware structure
1992 * @recps: struct that we need to populate
1993 * @rid: recipe ID that we are populating
1994 * @refresh_required: true if we should get recipe to profile mapping from FW
1996 * This function is used to populate all the necessary entries into our
1997 * bookkeeping so that we have a current list of all the recipes that are
1998 * programmed in the firmware.
2000 static enum ice_status
2001 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2002 bool *refresh_required)
2004 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2005 struct ice_aqc_recipe_data_elem *tmp;
2006 u16 num_recps = ICE_MAX_NUM_RECIPES;
2007 struct ice_prot_lkup_ext *lkup_exts;
2008 enum ice_status status;
2013 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2015 /* we need a buffer big enough to accommodate all the recipes */
2016 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2017 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2019 return ICE_ERR_NO_MEMORY;
2021 tmp[0].recipe_indx = rid;
2022 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2023 /* non-zero status meaning recipe doesn't exist */
2027 /* Get recipe to profile map so that we can get the fv from lkups that
2028 * we read for a recipe from FW. Since we want to minimize the number of
2029 * times we make this FW call, just make one call and cache the copy
2030 * until a new recipe is added. This operation is only required the
2031 * first time to get the changes from FW. Then to search existing
2032 * entries we don't need to update the cache again until another recipe
2035 if (*refresh_required) {
2036 ice_get_recp_to_prof_map(hw);
2037 *refresh_required = false;
2040 /* Start populating all the entries for recps[rid] based on lkups from
2041 * firmware. Note that we are only creating the root recipe in our
2044 lkup_exts = &recps[rid].lkup_exts;
2046 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2047 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2048 struct ice_recp_grp_entry *rg_entry;
2049 u8 i, prof, idx, prot = 0;
2053 rg_entry = (struct ice_recp_grp_entry *)
2054 ice_malloc(hw, sizeof(*rg_entry));
2056 status = ICE_ERR_NO_MEMORY;
2060 idx = root_bufs.recipe_indx;
2061 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2063 /* Mark all result indices in this chain */
2064 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2065 ice_set_bit(root_bufs.content.result_indx &
2066 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2068 /* get the first profile that is associated with rid */
2069 prof = ice_find_first_bit(recipe_to_profile[idx],
2070 ICE_MAX_NUM_PROFILES);
2071 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2072 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2074 rg_entry->fv_idx[i] = lkup_indx;
2075 rg_entry->fv_mask[i] =
2076 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2078 /* If the recipe is a chained recipe then all its
2079 * child recipe's result will have a result index.
2080 * To fill fv_words we should not use those result
2081 * index, we only need the protocol ids and offsets.
2082 * We will skip all the fv_idx which stores result
2083 * index in them. We also need to skip any fv_idx which
2084 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2085 * valid offset value.
2087 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2088 rg_entry->fv_idx[i]) ||
2089 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2090 rg_entry->fv_idx[i] == 0)
2093 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2094 rg_entry->fv_idx[i], &prot, &off);
2095 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2096 lkup_exts->fv_words[fv_word_idx].off = off;
2097 lkup_exts->field_mask[fv_word_idx] =
2098 rg_entry->fv_mask[i];
2099 if (prot == ICE_META_DATA_ID_HW &&
2100 off == ICE_TUN_FLAG_MDID_OFF)
2104 /* populate rg_list with the data from the child entry of this
2107 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2109 /* Propagate some data to the recipe database */
2110 recps[idx].is_root = !!is_root;
2111 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2112 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2113 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2114 recps[idx].chain_idx = root_bufs.content.result_indx &
2115 ~ICE_AQ_RECIPE_RESULT_EN;
2116 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2118 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2124 /* Only do the following for root recipes entries */
2125 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2126 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2127 recps[idx].root_rid = root_bufs.content.rid &
2128 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2129 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2132 /* Complete initialization of the root recipe entry */
2133 lkup_exts->n_val_words = fv_word_idx;
2134 recps[rid].big_recp = (num_recps > 1);
2135 recps[rid].n_grp_count = (u8)num_recps;
2136 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2137 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2138 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2139 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2140 if (!recps[rid].root_buf)
2143 /* Copy result indexes */
2144 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2145 recps[rid].recp_created = true;
2153 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2154 * @hw: pointer to hardware structure
2156 * This function is used to populate recipe_to_profile matrix where index to
2157 * this array is the recipe ID and the element is the mapping of which profiles
2158 * is this recipe mapped to.
2160 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2162 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2165 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2168 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2169 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2170 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2172 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2173 ICE_MAX_NUM_RECIPES);
2174 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2175 ice_set_bit(i, recipe_to_profile[j]);
2180 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2181 * @hw: pointer to the HW struct
2182 * @recp_list: pointer to sw recipe list
2184 * Allocate memory for the entire recipe table and initialize the structures/
2185 * entries corresponding to basic recipes.
2188 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2190 struct ice_sw_recipe *recps;
2193 recps = (struct ice_sw_recipe *)
2194 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2196 return ICE_ERR_NO_MEMORY;
2198 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2199 recps[i].root_rid = i;
2200 INIT_LIST_HEAD(&recps[i].filt_rules);
2201 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2202 INIT_LIST_HEAD(&recps[i].rg_list);
2203 ice_init_lock(&recps[i].filt_rule_lock);
2212 * ice_aq_get_sw_cfg - get switch configuration
2213 * @hw: pointer to the hardware structure
2214 * @buf: pointer to the result buffer
2215 * @buf_size: length of the buffer available for response
2216 * @req_desc: pointer to requested descriptor
2217 * @num_elems: pointer to number of elements
2218 * @cd: pointer to command details structure or NULL
2220 * Get switch configuration (0x0200) to be placed in buf.
2221 * This admin command returns information such as initial VSI/port number
2222 * and switch ID it belongs to.
2224 * NOTE: *req_desc is both an input/output parameter.
2225 * The caller of this function first calls this function with *request_desc set
2226 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2227 * configuration information has been returned; if non-zero (meaning not all
2228 * the information was returned), the caller should call this function again
2229 * with *req_desc set to the previous value returned by f/w to get the
2230 * next block of switch configuration information.
2232 * *num_elems is output only parameter. This reflects the number of elements
2233 * in response buffer. The caller of this function to use *num_elems while
2234 * parsing the response buffer.
2236 static enum ice_status
2237 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2238 u16 buf_size, u16 *req_desc, u16 *num_elems,
2239 struct ice_sq_cd *cd)
2241 struct ice_aqc_get_sw_cfg *cmd;
2242 struct ice_aq_desc desc;
2243 enum ice_status status;
2245 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2246 cmd = &desc.params.get_sw_conf;
2247 cmd->element = CPU_TO_LE16(*req_desc);
2249 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2251 *req_desc = LE16_TO_CPU(cmd->element);
2252 *num_elems = LE16_TO_CPU(cmd->num_elems);
2259 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2260 * @hw: pointer to the HW struct
2261 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2262 * @global_lut_id: output parameter for the RSS global LUT's ID
2264 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2266 struct ice_aqc_alloc_free_res_elem *sw_buf;
2267 enum ice_status status;
2270 buf_len = ice_struct_size(sw_buf, elem, 1);
2271 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2273 return ICE_ERR_NO_MEMORY;
2275 sw_buf->num_elems = CPU_TO_LE16(1);
2276 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2277 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2278 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2280 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2282 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2283 shared_res ? "shared" : "dedicated", status);
2284 goto ice_alloc_global_lut_exit;
2287 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2289 ice_alloc_global_lut_exit:
2290 ice_free(hw, sw_buf);
2295 * ice_free_global_lut - free a RSS global LUT
2296 * @hw: pointer to the HW struct
2297 * @global_lut_id: ID of the RSS global LUT to free
2299 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2301 struct ice_aqc_alloc_free_res_elem *sw_buf;
2302 u16 buf_len, num_elems = 1;
2303 enum ice_status status;
2305 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2306 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2308 return ICE_ERR_NO_MEMORY;
2310 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2311 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2312 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2314 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2316 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2317 global_lut_id, status);
2319 ice_free(hw, sw_buf);
2324 * ice_alloc_sw - allocate resources specific to switch
2325 * @hw: pointer to the HW struct
2326 * @ena_stats: true to turn on VEB stats
2327 * @shared_res: true for shared resource, false for dedicated resource
2328 * @sw_id: switch ID returned
2329 * @counter_id: VEB counter ID returned
2331 * allocates switch resources (SWID and VEB counter) (0x0208)
2334 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2337 struct ice_aqc_alloc_free_res_elem *sw_buf;
2338 struct ice_aqc_res_elem *sw_ele;
2339 enum ice_status status;
2342 buf_len = ice_struct_size(sw_buf, elem, 1);
2343 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2345 return ICE_ERR_NO_MEMORY;
2347 /* Prepare buffer for switch ID.
2348 * The number of resource entries in buffer is passed as 1 since only a
2349 * single switch/VEB instance is allocated, and hence a single sw_id
2352 sw_buf->num_elems = CPU_TO_LE16(1);
2354 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2355 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2356 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2358 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2359 ice_aqc_opc_alloc_res, NULL);
2362 goto ice_alloc_sw_exit;
2364 sw_ele = &sw_buf->elem[0];
2365 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2368 /* Prepare buffer for VEB Counter */
2369 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2370 struct ice_aqc_alloc_free_res_elem *counter_buf;
2371 struct ice_aqc_res_elem *counter_ele;
2373 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2374 ice_malloc(hw, buf_len);
2376 status = ICE_ERR_NO_MEMORY;
2377 goto ice_alloc_sw_exit;
2380 /* The number of resource entries in buffer is passed as 1 since
2381 * only a single switch/VEB instance is allocated, and hence a
2382 * single VEB counter is requested.
2384 counter_buf->num_elems = CPU_TO_LE16(1);
2385 counter_buf->res_type =
2386 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2387 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2388 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2392 ice_free(hw, counter_buf);
2393 goto ice_alloc_sw_exit;
2395 counter_ele = &counter_buf->elem[0];
2396 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2397 ice_free(hw, counter_buf);
2401 ice_free(hw, sw_buf);
2406 * ice_free_sw - free resources specific to switch
2407 * @hw: pointer to the HW struct
2408 * @sw_id: switch ID returned
2409 * @counter_id: VEB counter ID returned
2411 * free switch resources (SWID and VEB counter) (0x0209)
2413 * NOTE: This function frees multiple resources. It continues
2414 * releasing other resources even after it encounters error.
2415 * The error code returned is the last error it encountered.
2417 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2419 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2420 enum ice_status status, ret_status;
2423 buf_len = ice_struct_size(sw_buf, elem, 1);
2424 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2426 return ICE_ERR_NO_MEMORY;
2428 /* Prepare buffer to free for switch ID res.
2429 * The number of resource entries in buffer is passed as 1 since only a
2430 * single switch/VEB instance is freed, and hence a single sw_id
2433 sw_buf->num_elems = CPU_TO_LE16(1);
2434 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2435 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2437 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2438 ice_aqc_opc_free_res, NULL);
2441 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2443 /* Prepare buffer to free for VEB Counter resource */
2444 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2445 ice_malloc(hw, buf_len);
2447 ice_free(hw, sw_buf);
2448 return ICE_ERR_NO_MEMORY;
2451 /* The number of resource entries in buffer is passed as 1 since only a
2452 * single switch/VEB instance is freed, and hence a single VEB counter
2455 counter_buf->num_elems = CPU_TO_LE16(1);
2456 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2457 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2459 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2460 ice_aqc_opc_free_res, NULL);
2462 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2463 ret_status = status;
2466 ice_free(hw, counter_buf);
2467 ice_free(hw, sw_buf);
2473 * @hw: pointer to the HW struct
2474 * @vsi_ctx: pointer to a VSI context struct
2475 * @cd: pointer to command details structure or NULL
2477 * Add a VSI context to the hardware (0x0210)
2480 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2481 struct ice_sq_cd *cd)
2483 struct ice_aqc_add_update_free_vsi_resp *res;
2484 struct ice_aqc_add_get_update_free_vsi *cmd;
2485 struct ice_aq_desc desc;
2486 enum ice_status status;
2488 cmd = &desc.params.vsi_cmd;
2489 res = &desc.params.add_update_free_vsi_res;
2491 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2493 if (!vsi_ctx->alloc_from_pool)
2494 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2495 ICE_AQ_VSI_IS_VALID);
2497 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2499 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2501 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2502 sizeof(vsi_ctx->info), cd);
2505 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2506 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2507 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2515 * @hw: pointer to the HW struct
2516 * @vsi_ctx: pointer to a VSI context struct
2517 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2518 * @cd: pointer to command details structure or NULL
2520 * Free VSI context info from hardware (0x0213)
2523 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2524 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2526 struct ice_aqc_add_update_free_vsi_resp *resp;
2527 struct ice_aqc_add_get_update_free_vsi *cmd;
2528 struct ice_aq_desc desc;
2529 enum ice_status status;
2531 cmd = &desc.params.vsi_cmd;
2532 resp = &desc.params.add_update_free_vsi_res;
2534 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2536 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2538 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2540 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2542 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2543 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2551 * @hw: pointer to the HW struct
2552 * @vsi_ctx: pointer to a VSI context struct
2553 * @cd: pointer to command details structure or NULL
2555 * Update VSI context in the hardware (0x0211)
2558 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2559 struct ice_sq_cd *cd)
2561 struct ice_aqc_add_update_free_vsi_resp *resp;
2562 struct ice_aqc_add_get_update_free_vsi *cmd;
2563 struct ice_aq_desc desc;
2564 enum ice_status status;
2566 cmd = &desc.params.vsi_cmd;
2567 resp = &desc.params.add_update_free_vsi_res;
2569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2571 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2573 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2575 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2576 sizeof(vsi_ctx->info), cd);
2579 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2580 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2587 * ice_is_vsi_valid - check whether the VSI is valid or not
2588 * @hw: pointer to the HW struct
2589 * @vsi_handle: VSI handle
2591 * check whether the VSI is valid or not
2593 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2595 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2599 * ice_get_hw_vsi_num - return the HW VSI number
2600 * @hw: pointer to the HW struct
2601 * @vsi_handle: VSI handle
2603 * return the HW VSI number
2604 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2606 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2608 return hw->vsi_ctx[vsi_handle]->vsi_num;
2612 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2613 * @hw: pointer to the HW struct
2614 * @vsi_handle: VSI handle
2616 * return the VSI context entry for a given VSI handle
2618 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2620 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2624 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2625 * @hw: pointer to the HW struct
2626 * @vsi_handle: VSI handle
2627 * @vsi: VSI context pointer
2629 * save the VSI context entry for a given VSI handle
2632 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2634 hw->vsi_ctx[vsi_handle] = vsi;
2638 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2639 * @hw: pointer to the HW struct
2640 * @vsi_handle: VSI handle
2642 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2644 struct ice_vsi_ctx *vsi;
2647 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2650 ice_for_each_traffic_class(i) {
2651 if (vsi->lan_q_ctx[i]) {
2652 ice_free(hw, vsi->lan_q_ctx[i]);
2653 vsi->lan_q_ctx[i] = NULL;
2659 * ice_clear_vsi_ctx - clear the VSI context entry
2660 * @hw: pointer to the HW struct
2661 * @vsi_handle: VSI handle
2663 * clear the VSI context entry
2665 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2667 struct ice_vsi_ctx *vsi;
2669 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2671 ice_clear_vsi_q_ctx(hw, vsi_handle);
2673 hw->vsi_ctx[vsi_handle] = NULL;
2678 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2679 * @hw: pointer to the HW struct
2681 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2685 for (i = 0; i < ICE_MAX_VSI; i++)
2686 ice_clear_vsi_ctx(hw, i);
2690 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2691 * @hw: pointer to the HW struct
2692 * @vsi_handle: unique VSI handle provided by drivers
2693 * @vsi_ctx: pointer to a VSI context struct
2694 * @cd: pointer to command details structure or NULL
2696 * Add a VSI context to the hardware also add it into the VSI handle list.
2697 * If this function gets called after reset for existing VSIs then update
2698 * with the new HW VSI number in the corresponding VSI handle list entry.
2701 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2702 struct ice_sq_cd *cd)
2704 struct ice_vsi_ctx *tmp_vsi_ctx;
2705 enum ice_status status;
2707 if (vsi_handle >= ICE_MAX_VSI)
2708 return ICE_ERR_PARAM;
2709 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2712 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2714 /* Create a new VSI context */
2715 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2716 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2718 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2719 return ICE_ERR_NO_MEMORY;
2721 *tmp_vsi_ctx = *vsi_ctx;
2723 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2725 /* update with new HW VSI num */
2726 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2733 * ice_free_vsi- free VSI context from hardware and VSI handle list
2734 * @hw: pointer to the HW struct
2735 * @vsi_handle: unique VSI handle
2736 * @vsi_ctx: pointer to a VSI context struct
2737 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2738 * @cd: pointer to command details structure or NULL
2740 * Free VSI context info from hardware as well as from VSI handle list
2743 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2744 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2746 enum ice_status status;
2748 if (!ice_is_vsi_valid(hw, vsi_handle))
2749 return ICE_ERR_PARAM;
2750 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2751 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2753 ice_clear_vsi_ctx(hw, vsi_handle);
2759 * @hw: pointer to the HW struct
2760 * @vsi_handle: unique VSI handle
2761 * @vsi_ctx: pointer to a VSI context struct
2762 * @cd: pointer to command details structure or NULL
2764 * Update VSI context in the hardware
2767 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2768 struct ice_sq_cd *cd)
2770 if (!ice_is_vsi_valid(hw, vsi_handle))
2771 return ICE_ERR_PARAM;
2772 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2773 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2777 * ice_aq_get_vsi_params
2778 * @hw: pointer to the HW struct
2779 * @vsi_ctx: pointer to a VSI context struct
2780 * @cd: pointer to command details structure or NULL
2782 * Get VSI context info from hardware (0x0212)
2785 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2786 struct ice_sq_cd *cd)
2788 struct ice_aqc_add_get_update_free_vsi *cmd;
2789 struct ice_aqc_get_vsi_resp *resp;
2790 struct ice_aq_desc desc;
2791 enum ice_status status;
2793 cmd = &desc.params.vsi_cmd;
2794 resp = &desc.params.get_vsi_resp;
2796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2798 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2800 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2801 sizeof(vsi_ctx->info), cd);
2803 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2805 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2806 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2813 * ice_aq_add_update_mir_rule - add/update a mirror rule
2814 * @hw: pointer to the HW struct
2815 * @rule_type: Rule Type
2816 * @dest_vsi: VSI number to which packets will be mirrored
2817 * @count: length of the list
2818 * @mr_buf: buffer for list of mirrored VSI numbers
2819 * @cd: pointer to command details structure or NULL
2822 * Add/Update Mirror Rule (0x260).
2825 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2826 u16 count, struct ice_mir_rule_buf *mr_buf,
2827 struct ice_sq_cd *cd, u16 *rule_id)
2829 struct ice_aqc_add_update_mir_rule *cmd;
2830 struct ice_aq_desc desc;
2831 enum ice_status status;
2832 __le16 *mr_list = NULL;
2835 switch (rule_type) {
2836 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2837 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2838 /* Make sure count and mr_buf are set for these rule_types */
2839 if (!(count && mr_buf))
2840 return ICE_ERR_PARAM;
2842 buf_size = count * sizeof(__le16);
2843 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2845 return ICE_ERR_NO_MEMORY;
2847 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2848 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2849 /* Make sure count and mr_buf are not set for these
2852 if (count || mr_buf)
2853 return ICE_ERR_PARAM;
2856 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2857 return ICE_ERR_OUT_OF_RANGE;
2860 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2862 /* Pre-process 'mr_buf' items for add/update of virtual port
2863 * ingress/egress mirroring (but not physical port ingress/egress
2869 for (i = 0; i < count; i++) {
2872 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2874 /* Validate specified VSI number, make sure it is less
2875 * than ICE_MAX_VSI, if not return with error.
2877 if (id >= ICE_MAX_VSI) {
2878 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2880 ice_free(hw, mr_list);
2881 return ICE_ERR_OUT_OF_RANGE;
2884 /* add VSI to mirror rule */
2887 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2888 else /* remove VSI from mirror rule */
2889 mr_list[i] = CPU_TO_LE16(id);
2893 cmd = &desc.params.add_update_rule;
2894 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2895 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2896 ICE_AQC_RULE_ID_VALID_M);
2897 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2898 cmd->num_entries = CPU_TO_LE16(count);
2899 cmd->dest = CPU_TO_LE16(dest_vsi);
2901 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2903 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2905 ice_free(hw, mr_list);
2911 * ice_aq_delete_mir_rule - delete a mirror rule
2912 * @hw: pointer to the HW struct
2913 * @rule_id: Mirror rule ID (to be deleted)
2914 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2915 * otherwise it is returned to the shared pool
2916 * @cd: pointer to command details structure or NULL
2918 * Delete Mirror Rule (0x261).
2921 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2922 struct ice_sq_cd *cd)
2924 struct ice_aqc_delete_mir_rule *cmd;
2925 struct ice_aq_desc desc;
2927 /* rule_id should be in the range 0...63 */
2928 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2929 return ICE_ERR_OUT_OF_RANGE;
2931 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2933 cmd = &desc.params.del_rule;
2934 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2935 cmd->rule_id = CPU_TO_LE16(rule_id);
2938 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2940 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2944 * ice_aq_alloc_free_vsi_list
2945 * @hw: pointer to the HW struct
2946 * @vsi_list_id: VSI list ID returned or used for lookup
2947 * @lkup_type: switch rule filter lookup type
2948 * @opc: switch rules population command type - pass in the command opcode
2950 * allocates or free a VSI list resource
2952 static enum ice_status
2953 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2954 enum ice_sw_lkup_type lkup_type,
2955 enum ice_adminq_opc opc)
2957 struct ice_aqc_alloc_free_res_elem *sw_buf;
2958 struct ice_aqc_res_elem *vsi_ele;
2959 enum ice_status status;
2962 buf_len = ice_struct_size(sw_buf, elem, 1);
2963 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2965 return ICE_ERR_NO_MEMORY;
2966 sw_buf->num_elems = CPU_TO_LE16(1);
2968 if (lkup_type == ICE_SW_LKUP_MAC ||
2969 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2970 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2971 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2972 lkup_type == ICE_SW_LKUP_PROMISC ||
2973 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2974 lkup_type == ICE_SW_LKUP_LAST) {
2975 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2976 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2978 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2980 status = ICE_ERR_PARAM;
2981 goto ice_aq_alloc_free_vsi_list_exit;
2984 if (opc == ice_aqc_opc_free_res)
2985 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2987 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2989 goto ice_aq_alloc_free_vsi_list_exit;
2991 if (opc == ice_aqc_opc_alloc_res) {
2992 vsi_ele = &sw_buf->elem[0];
2993 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2996 ice_aq_alloc_free_vsi_list_exit:
2997 ice_free(hw, sw_buf);
3002 * ice_aq_set_storm_ctrl - Sets storm control configuration
3003 * @hw: pointer to the HW struct
3004 * @bcast_thresh: represents the upper threshold for broadcast storm control
3005 * @mcast_thresh: represents the upper threshold for multicast storm control
3006 * @ctl_bitmask: storm control knobs
3008 * Sets the storm control configuration (0x0280)
3011 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3014 struct ice_aqc_storm_cfg *cmd;
3015 struct ice_aq_desc desc;
3017 cmd = &desc.params.storm_conf;
3019 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3021 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3022 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3023 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3025 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3029 * ice_aq_get_storm_ctrl - gets storm control configuration
3030 * @hw: pointer to the HW struct
3031 * @bcast_thresh: represents the upper threshold for broadcast storm control
3032 * @mcast_thresh: represents the upper threshold for multicast storm control
3033 * @ctl_bitmask: storm control knobs
3035 * Gets the storm control configuration (0x0281)
3038 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3041 enum ice_status status;
3042 struct ice_aq_desc desc;
3044 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3046 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3048 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3051 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3054 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3057 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3064 * ice_aq_sw_rules - add/update/remove switch rules
3065 * @hw: pointer to the HW struct
3066 * @rule_list: pointer to switch rule population list
3067 * @rule_list_sz: total size of the rule list in bytes
3068 * @num_rules: number of switch rules in the rule_list
3069 * @opc: switch rules population command type - pass in the command opcode
3070 * @cd: pointer to command details structure or NULL
3072 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3074 static enum ice_status
3075 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3076 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3078 struct ice_aq_desc desc;
3079 enum ice_status status;
3081 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3083 if (opc != ice_aqc_opc_add_sw_rules &&
3084 opc != ice_aqc_opc_update_sw_rules &&
3085 opc != ice_aqc_opc_remove_sw_rules)
3086 return ICE_ERR_PARAM;
3088 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3090 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3091 desc.params.sw_rules.num_rules_fltr_entry_index =
3092 CPU_TO_LE16(num_rules);
3093 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3094 if (opc != ice_aqc_opc_add_sw_rules &&
3095 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3096 status = ICE_ERR_DOES_NOT_EXIST;
3102 * ice_aq_add_recipe - add switch recipe
3103 * @hw: pointer to the HW struct
3104 * @s_recipe_list: pointer to switch rule population list
3105 * @num_recipes: number of switch recipes in the list
3106 * @cd: pointer to command details structure or NULL
3111 ice_aq_add_recipe(struct ice_hw *hw,
3112 struct ice_aqc_recipe_data_elem *s_recipe_list,
3113 u16 num_recipes, struct ice_sq_cd *cd)
3115 struct ice_aqc_add_get_recipe *cmd;
3116 struct ice_aq_desc desc;
3119 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3120 cmd = &desc.params.add_get_recipe;
3121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3123 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3124 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3126 buf_size = num_recipes * sizeof(*s_recipe_list);
3128 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3132 * ice_aq_get_recipe - get switch recipe
3133 * @hw: pointer to the HW struct
3134 * @s_recipe_list: pointer to switch rule population list
3135 * @num_recipes: pointer to the number of recipes (input and output)
3136 * @recipe_root: root recipe number of recipe(s) to retrieve
3137 * @cd: pointer to command details structure or NULL
3141 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3142 * On output, *num_recipes will equal the number of entries returned in
3145 * The caller must supply enough space in s_recipe_list to hold all possible
3146 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3149 ice_aq_get_recipe(struct ice_hw *hw,
3150 struct ice_aqc_recipe_data_elem *s_recipe_list,
3151 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3153 struct ice_aqc_add_get_recipe *cmd;
3154 struct ice_aq_desc desc;
3155 enum ice_status status;
3158 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3159 return ICE_ERR_PARAM;
3161 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3162 cmd = &desc.params.add_get_recipe;
3163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3165 cmd->return_index = CPU_TO_LE16(recipe_root);
3166 cmd->num_sub_recipes = 0;
3168 buf_size = *num_recipes * sizeof(*s_recipe_list);
3170 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3171 /* cppcheck-suppress constArgument */
3172 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3178 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3179 * @hw: pointer to the HW struct
3180 * @profile_id: package profile ID to associate the recipe with
3181 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3182 * @cd: pointer to command details structure or NULL
3183 * Recipe to profile association (0x0291)
3186 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3187 struct ice_sq_cd *cd)
3189 struct ice_aqc_recipe_to_profile *cmd;
3190 struct ice_aq_desc desc;
3192 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3193 cmd = &desc.params.recipe_to_profile;
3194 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3195 cmd->profile_id = CPU_TO_LE16(profile_id);
3196 /* Set the recipe ID bit in the bitmask to let the device know which
3197 * profile we are associating the recipe to
3199 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3200 ICE_NONDMA_TO_NONDMA);
3202 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3206 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3207 * @hw: pointer to the HW struct
3208 * @profile_id: package profile ID to associate the recipe with
3209 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3210 * @cd: pointer to command details structure or NULL
3211 * Associate profile ID with given recipe (0x0293)
3214 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3215 struct ice_sq_cd *cd)
3217 struct ice_aqc_recipe_to_profile *cmd;
3218 struct ice_aq_desc desc;
3219 enum ice_status status;
3221 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3222 cmd = &desc.params.recipe_to_profile;
3223 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3224 cmd->profile_id = CPU_TO_LE16(profile_id);
3226 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3228 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3229 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3235 * ice_alloc_recipe - add recipe resource
3236 * @hw: pointer to the hardware structure
3237 * @rid: recipe ID returned as response to AQ call
3239 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3241 struct ice_aqc_alloc_free_res_elem *sw_buf;
3242 enum ice_status status;
3245 buf_len = ice_struct_size(sw_buf, elem, 1);
3246 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3248 return ICE_ERR_NO_MEMORY;
3250 sw_buf->num_elems = CPU_TO_LE16(1);
3251 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3252 ICE_AQC_RES_TYPE_S) |
3253 ICE_AQC_RES_TYPE_FLAG_SHARED);
3254 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3255 ice_aqc_opc_alloc_res, NULL);
3257 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3258 ice_free(hw, sw_buf);
3263 /* ice_init_port_info - Initialize port_info with switch configuration data
3264 * @pi: pointer to port_info
3265 * @vsi_port_num: VSI number or port number
3266 * @type: Type of switch element (port or VSI)
3267 * @swid: switch ID of the switch the element is attached to
3268 * @pf_vf_num: PF or VF number
3269 * @is_vf: true if the element is a VF, false otherwise
3272 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3273 u16 swid, u16 pf_vf_num, bool is_vf)
3276 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3277 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3279 pi->pf_vf_num = pf_vf_num;
3281 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3282 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3285 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3290 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3291 * @hw: pointer to the hardware structure
3293 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3295 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3296 enum ice_status status;
3303 num_total_ports = 1;
3305 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3306 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3309 return ICE_ERR_NO_MEMORY;
3311 /* Multiple calls to ice_aq_get_sw_cfg may be required
3312 * to get all the switch configuration information. The need
3313 * for additional calls is indicated by ice_aq_get_sw_cfg
3314 * writing a non-zero value in req_desc
3317 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3319 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3320 &req_desc, &num_elems, NULL);
3325 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3326 u16 pf_vf_num, swid, vsi_port_num;
3330 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3331 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3333 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3334 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3336 swid = LE16_TO_CPU(ele->swid);
3338 if (LE16_TO_CPU(ele->pf_vf_num) &
3339 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3342 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3343 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3346 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3347 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3348 if (j == num_total_ports) {
3349 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3350 status = ICE_ERR_CFG;
3353 ice_init_port_info(hw->port_info,
3354 vsi_port_num, res_type, swid,
3362 } while (req_desc && !status);
3370 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3371 * @hw: pointer to the hardware structure
3372 * @fi: filter info structure to fill/update
3374 * This helper function populates the lb_en and lan_en elements of the provided
3375 * ice_fltr_info struct using the switch's type and characteristics of the
3376 * switch rule being configured.
3378 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3380 if ((fi->flag & ICE_FLTR_RX) &&
3381 (fi->fltr_act == ICE_FWD_TO_VSI ||
3382 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3383 fi->lkup_type == ICE_SW_LKUP_LAST)
3387 if ((fi->flag & ICE_FLTR_TX) &&
3388 (fi->fltr_act == ICE_FWD_TO_VSI ||
3389 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3390 fi->fltr_act == ICE_FWD_TO_Q ||
3391 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3392 /* Setting LB for prune actions will result in replicated
3393 * packets to the internal switch that will be dropped.
3395 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3398 /* Set lan_en to TRUE if
3399 * 1. The switch is a VEB AND
3401 * 2.1 The lookup is a directional lookup like ethertype,
3402 * promiscuous, ethertype-MAC, promiscuous-VLAN
3403 * and default-port OR
3404 * 2.2 The lookup is VLAN, OR
3405 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3406 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3410 * The switch is a VEPA.
3412 * In all other cases, the LAN enable has to be set to false.
3415 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3416 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3417 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3418 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3419 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3420 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3421 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3422 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3423 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3424 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3433 * ice_fill_sw_rule - Helper function to fill switch rule structure
3434 * @hw: pointer to the hardware structure
3435 * @f_info: entry containing packet forwarding information
3436 * @s_rule: switch rule structure to be filled in based on mac_entry
3437 * @opc: switch rules population command type - pass in the command opcode
3440 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3441 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3443 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3451 if (opc == ice_aqc_opc_remove_sw_rules) {
3452 s_rule->pdata.lkup_tx_rx.act = 0;
3453 s_rule->pdata.lkup_tx_rx.index =
3454 CPU_TO_LE16(f_info->fltr_rule_id);
3455 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3459 eth_hdr_sz = sizeof(dummy_eth_header);
3460 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3462 /* initialize the ether header with a dummy header */
3463 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3464 ice_fill_sw_info(hw, f_info);
3466 switch (f_info->fltr_act) {
3467 case ICE_FWD_TO_VSI:
3468 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3469 ICE_SINGLE_ACT_VSI_ID_M;
3470 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3471 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3472 ICE_SINGLE_ACT_VALID_BIT;
3474 case ICE_FWD_TO_VSI_LIST:
3475 act |= ICE_SINGLE_ACT_VSI_LIST;
3476 act |= (f_info->fwd_id.vsi_list_id <<
3477 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3478 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3479 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3480 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3481 ICE_SINGLE_ACT_VALID_BIT;
3484 act |= ICE_SINGLE_ACT_TO_Q;
3485 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3486 ICE_SINGLE_ACT_Q_INDEX_M;
3488 case ICE_DROP_PACKET:
3489 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3490 ICE_SINGLE_ACT_VALID_BIT;
3492 case ICE_FWD_TO_QGRP:
3493 q_rgn = f_info->qgrp_size > 0 ?
3494 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3495 act |= ICE_SINGLE_ACT_TO_Q;
3496 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3497 ICE_SINGLE_ACT_Q_INDEX_M;
3498 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3499 ICE_SINGLE_ACT_Q_REGION_M;
3506 act |= ICE_SINGLE_ACT_LB_ENABLE;
3508 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3510 switch (f_info->lkup_type) {
3511 case ICE_SW_LKUP_MAC:
3512 daddr = f_info->l_data.mac.mac_addr;
3514 case ICE_SW_LKUP_VLAN:
3515 vlan_id = f_info->l_data.vlan.vlan_id;
3516 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3517 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3518 act |= ICE_SINGLE_ACT_PRUNE;
3519 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3522 case ICE_SW_LKUP_ETHERTYPE_MAC:
3523 daddr = f_info->l_data.ethertype_mac.mac_addr;
3525 case ICE_SW_LKUP_ETHERTYPE:
3526 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3527 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3529 case ICE_SW_LKUP_MAC_VLAN:
3530 daddr = f_info->l_data.mac_vlan.mac_addr;
3531 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3533 case ICE_SW_LKUP_PROMISC_VLAN:
3534 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3536 case ICE_SW_LKUP_PROMISC:
3537 daddr = f_info->l_data.mac_vlan.mac_addr;
3543 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3544 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3545 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3547 /* Recipe set depending on lookup type */
3548 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3549 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3550 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3553 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3554 ICE_NONDMA_TO_NONDMA);
3556 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3557 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3558 *off = CPU_TO_BE16(vlan_id);
3561 /* Create the switch rule with the final dummy Ethernet header */
3562 if (opc != ice_aqc_opc_update_sw_rules)
3563 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3567 * ice_add_marker_act
3568 * @hw: pointer to the hardware structure
3569 * @m_ent: the management entry for which sw marker needs to be added
3570 * @sw_marker: sw marker to tag the Rx descriptor with
3571 * @l_id: large action resource ID
3573 * Create a large action to hold software marker and update the switch rule
3574 * entry pointed by m_ent with newly created large action
3576 static enum ice_status
3577 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3578 u16 sw_marker, u16 l_id)
3580 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3581 /* For software marker we need 3 large actions
3582 * 1. FWD action: FWD TO VSI or VSI LIST
3583 * 2. GENERIC VALUE action to hold the profile ID
3584 * 3. GENERIC VALUE action to hold the software marker ID
3586 const u16 num_lg_acts = 3;
3587 enum ice_status status;
3593 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3594 return ICE_ERR_PARAM;
3596 /* Create two back-to-back switch rules and submit them to the HW using
3597 * one memory buffer:
3601 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3602 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3603 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3605 return ICE_ERR_NO_MEMORY;
3607 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3609 /* Fill in the first switch rule i.e. large action */
3610 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3611 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3612 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3614 /* First action VSI forwarding or VSI list forwarding depending on how
3617 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3618 m_ent->fltr_info.fwd_id.hw_vsi_id;
3620 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3621 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3622 if (m_ent->vsi_count > 1)
3623 act |= ICE_LG_ACT_VSI_LIST;
3624 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3626 /* Second action descriptor type */
3627 act = ICE_LG_ACT_GENERIC;
3629 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3630 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3632 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3633 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3635 /* Third action Marker value */
3636 act |= ICE_LG_ACT_GENERIC;
3637 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3638 ICE_LG_ACT_GENERIC_VALUE_M;
3640 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3642 /* call the fill switch rule to fill the lookup Tx Rx structure */
3643 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3644 ice_aqc_opc_update_sw_rules);
3646 /* Update the action to point to the large action ID */
3647 rx_tx->pdata.lkup_tx_rx.act =
3648 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3649 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3650 ICE_SINGLE_ACT_PTR_VAL_M));
3652 /* Use the filter rule ID of the previously created rule with single
3653 * act. Once the update happens, hardware will treat this as large
3656 rx_tx->pdata.lkup_tx_rx.index =
3657 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3659 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3660 ice_aqc_opc_update_sw_rules, NULL);
3662 m_ent->lg_act_idx = l_id;
3663 m_ent->sw_marker_id = sw_marker;
3666 ice_free(hw, lg_act);
3671 * ice_add_counter_act - add/update filter rule with counter action
3672 * @hw: pointer to the hardware structure
3673 * @m_ent: the management entry for which counter needs to be added
3674 * @counter_id: VLAN counter ID returned as part of allocate resource
3675 * @l_id: large action resource ID
3677 static enum ice_status
3678 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3679 u16 counter_id, u16 l_id)
3681 struct ice_aqc_sw_rules_elem *lg_act;
3682 struct ice_aqc_sw_rules_elem *rx_tx;
3683 enum ice_status status;
3684 /* 2 actions will be added while adding a large action counter */
3685 const int num_acts = 2;
3692 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3693 return ICE_ERR_PARAM;
3695 /* Create two back-to-back switch rules and submit them to the HW using
3696 * one memory buffer:
3700 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3701 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3702 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3704 return ICE_ERR_NO_MEMORY;
3706 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3708 /* Fill in the first switch rule i.e. large action */
3709 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3710 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3711 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3713 /* First action VSI forwarding or VSI list forwarding depending on how
3716 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3717 m_ent->fltr_info.fwd_id.hw_vsi_id;
3719 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3720 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3721 ICE_LG_ACT_VSI_LIST_ID_M;
3722 if (m_ent->vsi_count > 1)
3723 act |= ICE_LG_ACT_VSI_LIST;
3724 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3726 /* Second action counter ID */
3727 act = ICE_LG_ACT_STAT_COUNT;
3728 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3729 ICE_LG_ACT_STAT_COUNT_M;
3730 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3732 /* call the fill switch rule to fill the lookup Tx Rx structure */
3733 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3734 ice_aqc_opc_update_sw_rules);
3736 act = ICE_SINGLE_ACT_PTR;
3737 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3738 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3740 /* Use the filter rule ID of the previously created rule with single
3741 * act. Once the update happens, hardware will treat this as large
3744 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3745 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3747 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3748 ice_aqc_opc_update_sw_rules, NULL);
3750 m_ent->lg_act_idx = l_id;
3751 m_ent->counter_index = counter_id;
3754 ice_free(hw, lg_act);
3759 * ice_create_vsi_list_map
3760 * @hw: pointer to the hardware structure
3761 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3762 * @num_vsi: number of VSI handles in the array
3763 * @vsi_list_id: VSI list ID generated as part of allocate resource
3765 * Helper function to create a new entry of VSI list ID to VSI mapping
3766 * using the given VSI list ID
3768 static struct ice_vsi_list_map_info *
3769 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3772 struct ice_switch_info *sw = hw->switch_info;
3773 struct ice_vsi_list_map_info *v_map;
3776 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3780 v_map->vsi_list_id = vsi_list_id;
3782 for (i = 0; i < num_vsi; i++)
3783 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3785 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3790 * ice_update_vsi_list_rule
3791 * @hw: pointer to the hardware structure
3792 * @vsi_handle_arr: array of VSI handles to form a VSI list
3793 * @num_vsi: number of VSI handles in the array
3794 * @vsi_list_id: VSI list ID generated as part of allocate resource
3795 * @remove: Boolean value to indicate if this is a remove action
3796 * @opc: switch rules population command type - pass in the command opcode
3797 * @lkup_type: lookup type of the filter
3799 * Call AQ command to add a new switch rule or update existing switch rule
3800 * using the given VSI list ID
3802 static enum ice_status
3803 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3804 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3805 enum ice_sw_lkup_type lkup_type)
3807 struct ice_aqc_sw_rules_elem *s_rule;
3808 enum ice_status status;
3814 return ICE_ERR_PARAM;
3816 if (lkup_type == ICE_SW_LKUP_MAC ||
3817 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3818 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3819 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3820 lkup_type == ICE_SW_LKUP_PROMISC ||
3821 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3822 lkup_type == ICE_SW_LKUP_LAST)
3823 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3824 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3825 else if (lkup_type == ICE_SW_LKUP_VLAN)
3826 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3827 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3829 return ICE_ERR_PARAM;
3831 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3832 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3834 return ICE_ERR_NO_MEMORY;
3835 for (i = 0; i < num_vsi; i++) {
3836 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3837 status = ICE_ERR_PARAM;
3840 /* AQ call requires hw_vsi_id(s) */
3841 s_rule->pdata.vsi_list.vsi[i] =
3842 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3845 s_rule->type = CPU_TO_LE16(rule_type);
3846 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3847 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3849 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3852 ice_free(hw, s_rule);
3857 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3858 * @hw: pointer to the HW struct
3859 * @vsi_handle_arr: array of VSI handles to form a VSI list
3860 * @num_vsi: number of VSI handles in the array
3861 * @vsi_list_id: stores the ID of the VSI list to be created
3862 * @lkup_type: switch rule filter's lookup type
3864 static enum ice_status
3865 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3866 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3868 enum ice_status status;
3870 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3871 ice_aqc_opc_alloc_res);
3875 /* Update the newly created VSI list to include the specified VSIs */
3876 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3877 *vsi_list_id, false,
3878 ice_aqc_opc_add_sw_rules, lkup_type);
3882 * ice_create_pkt_fwd_rule
3883 * @hw: pointer to the hardware structure
3884 * @recp_list: corresponding filter management list
3885 * @f_entry: entry containing packet forwarding information
3887 * Create switch rule with given filter information and add an entry
3888 * to the corresponding filter management list to track this switch rule
3891 static enum ice_status
3892 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3893 struct ice_fltr_list_entry *f_entry)
3895 struct ice_fltr_mgmt_list_entry *fm_entry;
3896 struct ice_aqc_sw_rules_elem *s_rule;
3897 enum ice_status status;
3899 s_rule = (struct ice_aqc_sw_rules_elem *)
3900 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3902 return ICE_ERR_NO_MEMORY;
3903 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3904 ice_malloc(hw, sizeof(*fm_entry));
3906 status = ICE_ERR_NO_MEMORY;
3907 goto ice_create_pkt_fwd_rule_exit;
3910 fm_entry->fltr_info = f_entry->fltr_info;
3912 /* Initialize all the fields for the management entry */
3913 fm_entry->vsi_count = 1;
3914 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3915 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3916 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3918 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3919 ice_aqc_opc_add_sw_rules);
3921 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3922 ice_aqc_opc_add_sw_rules, NULL);
3924 ice_free(hw, fm_entry);
3925 goto ice_create_pkt_fwd_rule_exit;
3928 f_entry->fltr_info.fltr_rule_id =
3929 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3930 fm_entry->fltr_info.fltr_rule_id =
3931 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3933 /* The book keeping entries will get removed when base driver
3934 * calls remove filter AQ command
3936 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3938 ice_create_pkt_fwd_rule_exit:
3939 ice_free(hw, s_rule);
3944 * ice_update_pkt_fwd_rule
3945 * @hw: pointer to the hardware structure
3946 * @f_info: filter information for switch rule
3948 * Call AQ command to update a previously created switch rule with a
3951 static enum ice_status
3952 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3954 struct ice_aqc_sw_rules_elem *s_rule;
3955 enum ice_status status;
3957 s_rule = (struct ice_aqc_sw_rules_elem *)
3958 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3960 return ICE_ERR_NO_MEMORY;
3962 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3964 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3966 /* Update switch rule with new rule set to forward VSI list */
3967 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3968 ice_aqc_opc_update_sw_rules, NULL);
3970 ice_free(hw, s_rule);
3975 * ice_update_sw_rule_bridge_mode
3976 * @hw: pointer to the HW struct
3978 * Updates unicast switch filter rules based on VEB/VEPA mode
3980 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3982 struct ice_switch_info *sw = hw->switch_info;
3983 struct ice_fltr_mgmt_list_entry *fm_entry;
3984 enum ice_status status = ICE_SUCCESS;
3985 struct LIST_HEAD_TYPE *rule_head;
3986 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3988 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3989 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3991 ice_acquire_lock(rule_lock);
3992 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3994 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3995 u8 *addr = fi->l_data.mac.mac_addr;
3997 /* Update unicast Tx rules to reflect the selected
4000 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4001 (fi->fltr_act == ICE_FWD_TO_VSI ||
4002 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4003 fi->fltr_act == ICE_FWD_TO_Q ||
4004 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4005 status = ice_update_pkt_fwd_rule(hw, fi);
4011 ice_release_lock(rule_lock);
4017 * ice_add_update_vsi_list
4018 * @hw: pointer to the hardware structure
4019 * @m_entry: pointer to current filter management list entry
4020 * @cur_fltr: filter information from the book keeping entry
4021 * @new_fltr: filter information with the new VSI to be added
4023 * Call AQ command to add or update previously created VSI list with new VSI.
4025 * Helper function to do book keeping associated with adding filter information
4026 * The algorithm to do the book keeping is described below :
4027 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4028 * if only one VSI has been added till now
4029 * Allocate a new VSI list and add two VSIs
4030 * to this list using switch rule command
4031 * Update the previously created switch rule with the
4032 * newly created VSI list ID
4033 * if a VSI list was previously created
4034 * Add the new VSI to the previously created VSI list set
4035 * using the update switch rule command
4037 static enum ice_status
4038 ice_add_update_vsi_list(struct ice_hw *hw,
4039 struct ice_fltr_mgmt_list_entry *m_entry,
4040 struct ice_fltr_info *cur_fltr,
4041 struct ice_fltr_info *new_fltr)
4043 enum ice_status status = ICE_SUCCESS;
4044 u16 vsi_list_id = 0;
4046 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4047 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4048 return ICE_ERR_NOT_IMPL;
4050 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4051 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4052 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4053 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4054 return ICE_ERR_NOT_IMPL;
4056 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4057 /* Only one entry existed in the mapping and it was not already
4058 * a part of a VSI list. So, create a VSI list with the old and
4061 struct ice_fltr_info tmp_fltr;
4062 u16 vsi_handle_arr[2];
4064 /* A rule already exists with the new VSI being added */
4065 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4066 return ICE_ERR_ALREADY_EXISTS;
4068 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4069 vsi_handle_arr[1] = new_fltr->vsi_handle;
4070 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4072 new_fltr->lkup_type);
4076 tmp_fltr = *new_fltr;
4077 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4078 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4079 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4080 /* Update the previous switch rule of "MAC forward to VSI" to
4081 * "MAC fwd to VSI list"
4083 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4087 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4088 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4089 m_entry->vsi_list_info =
4090 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4093 /* If this entry was large action then the large action needs
4094 * to be updated to point to FWD to VSI list
4096 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4098 ice_add_marker_act(hw, m_entry,
4099 m_entry->sw_marker_id,
4100 m_entry->lg_act_idx);
4102 u16 vsi_handle = new_fltr->vsi_handle;
4103 enum ice_adminq_opc opcode;
4105 if (!m_entry->vsi_list_info)
4108 /* A rule already exists with the new VSI being added */
4109 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4112 /* Update the previously created VSI list set with
4113 * the new VSI ID passed in
4115 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4116 opcode = ice_aqc_opc_update_sw_rules;
4118 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4119 vsi_list_id, false, opcode,
4120 new_fltr->lkup_type);
4121 /* update VSI list mapping info with new VSI ID */
4123 ice_set_bit(vsi_handle,
4124 m_entry->vsi_list_info->vsi_map);
4127 m_entry->vsi_count++;
4132 * ice_find_rule_entry - Search a rule entry
4133 * @list_head: head of rule list
4134 * @f_info: rule information
4136 * Helper function to search for a given rule entry
4137 * Returns pointer to entry storing the rule if found
4139 static struct ice_fltr_mgmt_list_entry *
4140 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4141 struct ice_fltr_info *f_info)
4143 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4145 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4147 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4148 sizeof(f_info->l_data)) &&
4149 f_info->flag == list_itr->fltr_info.flag) {
4158 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4159 * @recp_list: VSI lists needs to be searched
4160 * @vsi_handle: VSI handle to be found in VSI list
4161 * @vsi_list_id: VSI list ID found containing vsi_handle
4163 * Helper function to search a VSI list with single entry containing given VSI
4164 * handle element. This can be extended further to search VSI list with more
4165 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4167 static struct ice_vsi_list_map_info *
4168 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4171 struct ice_vsi_list_map_info *map_info = NULL;
4172 struct LIST_HEAD_TYPE *list_head;
4174 list_head = &recp_list->filt_rules;
4175 if (recp_list->adv_rule) {
4176 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4178 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4179 ice_adv_fltr_mgmt_list_entry,
4181 if (list_itr->vsi_list_info) {
4182 map_info = list_itr->vsi_list_info;
4183 if (ice_is_bit_set(map_info->vsi_map,
4185 *vsi_list_id = map_info->vsi_list_id;
4191 struct ice_fltr_mgmt_list_entry *list_itr;
4193 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4194 ice_fltr_mgmt_list_entry,
4196 if (list_itr->vsi_count == 1 &&
4197 list_itr->vsi_list_info) {
4198 map_info = list_itr->vsi_list_info;
4199 if (ice_is_bit_set(map_info->vsi_map,
4201 *vsi_list_id = map_info->vsi_list_id;
4211 * ice_add_rule_internal - add rule for a given lookup type
4212 * @hw: pointer to the hardware structure
4213 * @recp_list: recipe list for which rule has to be added
4214 * @lport: logic port number on which function add rule
4215 * @f_entry: structure containing MAC forwarding information
4217 * Adds or updates the rule lists for a given recipe
4219 static enum ice_status
4220 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4221 u8 lport, struct ice_fltr_list_entry *f_entry)
4223 struct ice_fltr_info *new_fltr, *cur_fltr;
4224 struct ice_fltr_mgmt_list_entry *m_entry;
4225 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4226 enum ice_status status = ICE_SUCCESS;
4228 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4229 return ICE_ERR_PARAM;
4231 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4232 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4233 f_entry->fltr_info.fwd_id.hw_vsi_id =
4234 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4236 rule_lock = &recp_list->filt_rule_lock;
4238 ice_acquire_lock(rule_lock);
4239 new_fltr = &f_entry->fltr_info;
4240 if (new_fltr->flag & ICE_FLTR_RX)
4241 new_fltr->src = lport;
4242 else if (new_fltr->flag & ICE_FLTR_TX)
4244 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4246 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4248 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4249 goto exit_add_rule_internal;
4252 cur_fltr = &m_entry->fltr_info;
4253 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4255 exit_add_rule_internal:
4256 ice_release_lock(rule_lock);
4261 * ice_remove_vsi_list_rule
4262 * @hw: pointer to the hardware structure
4263 * @vsi_list_id: VSI list ID generated as part of allocate resource
4264 * @lkup_type: switch rule filter lookup type
4266 * The VSI list should be emptied before this function is called to remove the
4269 static enum ice_status
4270 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4271 enum ice_sw_lkup_type lkup_type)
4273 /* Free the vsi_list resource that we allocated. It is assumed that the
4274 * list is empty at this point.
4276 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4277 ice_aqc_opc_free_res);
4281 * ice_rem_update_vsi_list
4282 * @hw: pointer to the hardware structure
4283 * @vsi_handle: VSI handle of the VSI to remove
4284 * @fm_list: filter management entry for which the VSI list management needs to
4287 static enum ice_status
4288 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4289 struct ice_fltr_mgmt_list_entry *fm_list)
4291 enum ice_sw_lkup_type lkup_type;
4292 enum ice_status status = ICE_SUCCESS;
4295 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4296 fm_list->vsi_count == 0)
4297 return ICE_ERR_PARAM;
4299 /* A rule with the VSI being removed does not exist */
4300 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4301 return ICE_ERR_DOES_NOT_EXIST;
4303 lkup_type = fm_list->fltr_info.lkup_type;
4304 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4305 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4306 ice_aqc_opc_update_sw_rules,
4311 fm_list->vsi_count--;
4312 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4314 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4315 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4316 struct ice_vsi_list_map_info *vsi_list_info =
4317 fm_list->vsi_list_info;
4320 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4322 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4323 return ICE_ERR_OUT_OF_RANGE;
4325 /* Make sure VSI list is empty before removing it below */
4326 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4328 ice_aqc_opc_update_sw_rules,
4333 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4334 tmp_fltr_info.fwd_id.hw_vsi_id =
4335 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4336 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4337 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4339 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4340 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4344 fm_list->fltr_info = tmp_fltr_info;
4347 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4348 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4349 struct ice_vsi_list_map_info *vsi_list_info =
4350 fm_list->vsi_list_info;
4352 /* Remove the VSI list since it is no longer used */
4353 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4355 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4356 vsi_list_id, status);
4360 LIST_DEL(&vsi_list_info->list_entry);
4361 ice_free(hw, vsi_list_info);
4362 fm_list->vsi_list_info = NULL;
4369 * ice_remove_rule_internal - Remove a filter rule of a given type
4371 * @hw: pointer to the hardware structure
4372 * @recp_list: recipe list for which the rule needs to removed
4373 * @f_entry: rule entry containing filter information
4375 static enum ice_status
4376 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4377 struct ice_fltr_list_entry *f_entry)
4379 struct ice_fltr_mgmt_list_entry *list_elem;
4380 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4381 enum ice_status status = ICE_SUCCESS;
4382 bool remove_rule = false;
4385 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4386 return ICE_ERR_PARAM;
4387 f_entry->fltr_info.fwd_id.hw_vsi_id =
4388 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4390 rule_lock = &recp_list->filt_rule_lock;
4391 ice_acquire_lock(rule_lock);
4392 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4393 &f_entry->fltr_info);
4395 status = ICE_ERR_DOES_NOT_EXIST;
4399 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4401 } else if (!list_elem->vsi_list_info) {
4402 status = ICE_ERR_DOES_NOT_EXIST;
4404 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4405 /* a ref_cnt > 1 indicates that the vsi_list is being
4406 * shared by multiple rules. Decrement the ref_cnt and
4407 * remove this rule, but do not modify the list, as it
4408 * is in-use by other rules.
4410 list_elem->vsi_list_info->ref_cnt--;
4413 /* a ref_cnt of 1 indicates the vsi_list is only used
4414 * by one rule. However, the original removal request is only
4415 * for a single VSI. Update the vsi_list first, and only
4416 * remove the rule if there are no further VSIs in this list.
4418 vsi_handle = f_entry->fltr_info.vsi_handle;
4419 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4422 /* if VSI count goes to zero after updating the VSI list */
4423 if (list_elem->vsi_count == 0)
4428 /* Remove the lookup rule */
4429 struct ice_aqc_sw_rules_elem *s_rule;
4431 s_rule = (struct ice_aqc_sw_rules_elem *)
4432 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4434 status = ICE_ERR_NO_MEMORY;
4438 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4439 ice_aqc_opc_remove_sw_rules);
4441 status = ice_aq_sw_rules(hw, s_rule,
4442 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4443 ice_aqc_opc_remove_sw_rules, NULL);
4445 /* Remove a book keeping from the list */
4446 ice_free(hw, s_rule);
4451 LIST_DEL(&list_elem->list_entry);
4452 ice_free(hw, list_elem);
4455 ice_release_lock(rule_lock);
4460 * ice_aq_get_res_alloc - get allocated resources
4461 * @hw: pointer to the HW struct
4462 * @num_entries: pointer to u16 to store the number of resource entries returned
4463 * @buf: pointer to buffer
4464 * @buf_size: size of buf
4465 * @cd: pointer to command details structure or NULL
4467 * The caller-supplied buffer must be large enough to store the resource
4468 * information for all resource types. Each resource type is an
4469 * ice_aqc_get_res_resp_elem structure.
4472 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4473 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4474 struct ice_sq_cd *cd)
4476 struct ice_aqc_get_res_alloc *resp;
4477 enum ice_status status;
4478 struct ice_aq_desc desc;
4481 return ICE_ERR_BAD_PTR;
4483 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4484 return ICE_ERR_INVAL_SIZE;
4486 resp = &desc.params.get_res;
4488 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4489 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4491 if (!status && num_entries)
4492 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4498 * ice_aq_get_res_descs - get allocated resource descriptors
4499 * @hw: pointer to the hardware structure
4500 * @num_entries: number of resource entries in buffer
4501 * @buf: structure to hold response data buffer
4502 * @buf_size: size of buffer
4503 * @res_type: resource type
4504 * @res_shared: is resource shared
4505 * @desc_id: input - first desc ID to start; output - next desc ID
4506 * @cd: pointer to command details structure or NULL
4509 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4510 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4511 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4513 struct ice_aqc_get_allocd_res_desc *cmd;
4514 struct ice_aq_desc desc;
4515 enum ice_status status;
4517 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4519 cmd = &desc.params.get_res_desc;
4522 return ICE_ERR_PARAM;
4524 if (buf_size != (num_entries * sizeof(*buf)))
4525 return ICE_ERR_PARAM;
4527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4529 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4530 ICE_AQC_RES_TYPE_M) | (res_shared ?
4531 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4532 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4534 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4536 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4542 * ice_add_mac_rule - Add a MAC address based filter rule
4543 * @hw: pointer to the hardware structure
4544 * @m_list: list of MAC addresses and forwarding information
4545 * @sw: pointer to switch info struct for which function add rule
4546 * @lport: logic port number on which function add rule
4548 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4549 * multiple unicast addresses, the function assumes that all the
4550 * addresses are unique in a given add_mac call. It doesn't
4551 * check for duplicates in this case, removing duplicates from a given
4552 * list should be taken care of in the caller of this function.
4554 static enum ice_status
4555 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4556 struct ice_switch_info *sw, u8 lport)
4558 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4559 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4560 struct ice_fltr_list_entry *m_list_itr;
4561 struct LIST_HEAD_TYPE *rule_head;
4562 u16 total_elem_left, s_rule_size;
4563 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4564 enum ice_status status = ICE_SUCCESS;
4565 u16 num_unicast = 0;
4569 rule_lock = &recp_list->filt_rule_lock;
4570 rule_head = &recp_list->filt_rules;
4572 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4574 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4578 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4579 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4580 if (!ice_is_vsi_valid(hw, vsi_handle))
4581 return ICE_ERR_PARAM;
4582 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4583 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4584 /* update the src in case it is VSI num */
4585 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4586 return ICE_ERR_PARAM;
4587 m_list_itr->fltr_info.src = hw_vsi_id;
4588 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4589 IS_ZERO_ETHER_ADDR(add))
4590 return ICE_ERR_PARAM;
4591 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4592 /* Don't overwrite the unicast address */
4593 ice_acquire_lock(rule_lock);
4594 if (ice_find_rule_entry(rule_head,
4595 &m_list_itr->fltr_info)) {
4596 ice_release_lock(rule_lock);
4597 return ICE_ERR_ALREADY_EXISTS;
4599 ice_release_lock(rule_lock);
4601 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4602 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4603 m_list_itr->status =
4604 ice_add_rule_internal(hw, recp_list, lport,
4606 if (m_list_itr->status)
4607 return m_list_itr->status;
4611 ice_acquire_lock(rule_lock);
4612 /* Exit if no suitable entries were found for adding bulk switch rule */
4614 status = ICE_SUCCESS;
4615 goto ice_add_mac_exit;
4618 /* Allocate switch rule buffer for the bulk update for unicast */
4619 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4620 s_rule = (struct ice_aqc_sw_rules_elem *)
4621 ice_calloc(hw, num_unicast, s_rule_size);
4623 status = ICE_ERR_NO_MEMORY;
4624 goto ice_add_mac_exit;
4628 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4630 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4631 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4633 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4634 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4635 ice_aqc_opc_add_sw_rules);
4636 r_iter = (struct ice_aqc_sw_rules_elem *)
4637 ((u8 *)r_iter + s_rule_size);
4641 /* Call AQ bulk switch rule update for all unicast addresses */
4643 /* Call AQ switch rule in AQ_MAX chunk */
4644 for (total_elem_left = num_unicast; total_elem_left > 0;
4645 total_elem_left -= elem_sent) {
4646 struct ice_aqc_sw_rules_elem *entry = r_iter;
4648 elem_sent = MIN_T(u8, total_elem_left,
4649 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4650 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4651 elem_sent, ice_aqc_opc_add_sw_rules,
4654 goto ice_add_mac_exit;
4655 r_iter = (struct ice_aqc_sw_rules_elem *)
4656 ((u8 *)r_iter + (elem_sent * s_rule_size));
4659 /* Fill up rule ID based on the value returned from FW */
4661 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4663 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4664 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4665 struct ice_fltr_mgmt_list_entry *fm_entry;
4667 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4668 f_info->fltr_rule_id =
4669 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4670 f_info->fltr_act = ICE_FWD_TO_VSI;
4671 /* Create an entry to track this MAC address */
4672 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4673 ice_malloc(hw, sizeof(*fm_entry));
4675 status = ICE_ERR_NO_MEMORY;
4676 goto ice_add_mac_exit;
4678 fm_entry->fltr_info = *f_info;
4679 fm_entry->vsi_count = 1;
4680 /* The book keeping entries will get removed when
4681 * base driver calls remove filter AQ command
4684 LIST_ADD(&fm_entry->list_entry, rule_head);
4685 r_iter = (struct ice_aqc_sw_rules_elem *)
4686 ((u8 *)r_iter + s_rule_size);
4691 ice_release_lock(rule_lock);
4693 ice_free(hw, s_rule);
4698 * ice_add_mac - Add a MAC address based filter rule
4699 * @hw: pointer to the hardware structure
4700 * @m_list: list of MAC addresses and forwarding information
4702 * Function add MAC rule for logical port from HW struct
4704 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4707 return ICE_ERR_PARAM;
4709 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4710 hw->port_info->lport);
4714 * ice_add_vlan_internal - Add one VLAN based filter rule
4715 * @hw: pointer to the hardware structure
4716 * @recp_list: recipe list for which rule has to be added
4717 * @f_entry: filter entry containing one VLAN information
4719 static enum ice_status
4720 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4721 struct ice_fltr_list_entry *f_entry)
4723 struct ice_fltr_mgmt_list_entry *v_list_itr;
4724 struct ice_fltr_info *new_fltr, *cur_fltr;
4725 enum ice_sw_lkup_type lkup_type;
4726 u16 vsi_list_id = 0, vsi_handle;
4727 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4728 enum ice_status status = ICE_SUCCESS;
4730 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4731 return ICE_ERR_PARAM;
4733 f_entry->fltr_info.fwd_id.hw_vsi_id =
4734 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4735 new_fltr = &f_entry->fltr_info;
4737 /* VLAN ID should only be 12 bits */
4738 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4739 return ICE_ERR_PARAM;
4741 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4742 return ICE_ERR_PARAM;
4744 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4745 lkup_type = new_fltr->lkup_type;
4746 vsi_handle = new_fltr->vsi_handle;
4747 rule_lock = &recp_list->filt_rule_lock;
4748 ice_acquire_lock(rule_lock);
4749 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4751 struct ice_vsi_list_map_info *map_info = NULL;
4753 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4754 /* All VLAN pruning rules use a VSI list. Check if
4755 * there is already a VSI list containing VSI that we
4756 * want to add. If found, use the same vsi_list_id for
4757 * this new VLAN rule or else create a new list.
4759 map_info = ice_find_vsi_list_entry(recp_list,
4763 status = ice_create_vsi_list_rule(hw,
4771 /* Convert the action to forwarding to a VSI list. */
4772 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4773 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4776 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4778 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4781 status = ICE_ERR_DOES_NOT_EXIST;
4784 /* reuse VSI list for new rule and increment ref_cnt */
4786 v_list_itr->vsi_list_info = map_info;
4787 map_info->ref_cnt++;
4789 v_list_itr->vsi_list_info =
4790 ice_create_vsi_list_map(hw, &vsi_handle,
4794 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4795 /* Update existing VSI list to add new VSI ID only if it used
4798 cur_fltr = &v_list_itr->fltr_info;
4799 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4802 /* If VLAN rule exists and VSI list being used by this rule is
4803 * referenced by more than 1 VLAN rule. Then create a new VSI
4804 * list appending previous VSI with new VSI and update existing
4805 * VLAN rule to point to new VSI list ID
4807 struct ice_fltr_info tmp_fltr;
4808 u16 vsi_handle_arr[2];
4811 /* Current implementation only supports reusing VSI list with
4812 * one VSI count. We should never hit below condition
4814 if (v_list_itr->vsi_count > 1 &&
4815 v_list_itr->vsi_list_info->ref_cnt > 1) {
4816 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4817 status = ICE_ERR_CFG;
4822 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4825 /* A rule already exists with the new VSI being added */
4826 if (cur_handle == vsi_handle) {
4827 status = ICE_ERR_ALREADY_EXISTS;
4831 vsi_handle_arr[0] = cur_handle;
4832 vsi_handle_arr[1] = vsi_handle;
4833 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4834 &vsi_list_id, lkup_type);
4838 tmp_fltr = v_list_itr->fltr_info;
4839 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4840 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4841 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4842 /* Update the previous switch rule to a new VSI list which
4843 * includes current VSI that is requested
4845 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4849 /* before overriding VSI list map info. decrement ref_cnt of
4852 v_list_itr->vsi_list_info->ref_cnt--;
4854 /* now update to newly created list */
4855 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4856 v_list_itr->vsi_list_info =
4857 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4859 v_list_itr->vsi_count++;
4863 ice_release_lock(rule_lock);
4868 * ice_add_vlan_rule - Add VLAN based filter rule
4869 * @hw: pointer to the hardware structure
4870 * @v_list: list of VLAN entries and forwarding information
4871 * @sw: pointer to switch info struct for which function add rule
4873 static enum ice_status
4874 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4875 struct ice_switch_info *sw)
4877 struct ice_fltr_list_entry *v_list_itr;
4878 struct ice_sw_recipe *recp_list;
4880 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4881 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4883 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4884 return ICE_ERR_PARAM;
4885 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4886 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4888 if (v_list_itr->status)
4889 return v_list_itr->status;
4895 * ice_add_vlan - Add a VLAN based filter rule
4896 * @hw: pointer to the hardware structure
4897 * @v_list: list of VLAN and forwarding information
4899 * Function add VLAN rule for logical port from HW struct
4901 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4904 return ICE_ERR_PARAM;
4906 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4910 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4911 * @hw: pointer to the hardware structure
4912 * @mv_list: list of MAC and VLAN filters
4913 * @sw: pointer to switch info struct for which function add rule
4914 * @lport: logic port number on which function add rule
4916 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4917 * pruning bits enabled, then it is the responsibility of the caller to make
4918 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4919 * VLAN won't be received on that VSI otherwise.
4921 static enum ice_status
4922 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4923 struct ice_switch_info *sw, u8 lport)
4925 struct ice_fltr_list_entry *mv_list_itr;
4926 struct ice_sw_recipe *recp_list;
4928 if (!mv_list || !hw)
4929 return ICE_ERR_PARAM;
4931 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4932 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4934 enum ice_sw_lkup_type l_type =
4935 mv_list_itr->fltr_info.lkup_type;
4937 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4938 return ICE_ERR_PARAM;
4939 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4940 mv_list_itr->status =
4941 ice_add_rule_internal(hw, recp_list, lport,
4943 if (mv_list_itr->status)
4944 return mv_list_itr->status;
4950 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4951 * @hw: pointer to the hardware structure
4952 * @mv_list: list of MAC VLAN addresses and forwarding information
4954 * Function add MAC VLAN rule for logical port from HW struct
4957 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4959 if (!mv_list || !hw)
4960 return ICE_ERR_PARAM;
4962 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4963 hw->port_info->lport);
4967 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4968 * @hw: pointer to the hardware structure
4969 * @em_list: list of ether type MAC filter, MAC is optional
4970 * @sw: pointer to switch info struct for which function add rule
4971 * @lport: logic port number on which function add rule
4973 * This function requires the caller to populate the entries in
4974 * the filter list with the necessary fields (including flags to
4975 * indicate Tx or Rx rules).
4977 static enum ice_status
4978 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4979 struct ice_switch_info *sw, u8 lport)
4981 struct ice_fltr_list_entry *em_list_itr;
4983 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4985 struct ice_sw_recipe *recp_list;
4986 enum ice_sw_lkup_type l_type;
4988 l_type = em_list_itr->fltr_info.lkup_type;
4989 recp_list = &sw->recp_list[l_type];
4991 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4992 l_type != ICE_SW_LKUP_ETHERTYPE)
4993 return ICE_ERR_PARAM;
4995 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4998 if (em_list_itr->status)
4999 return em_list_itr->status;
5005 * ice_add_eth_mac - Add a ethertype based filter rule
5006 * @hw: pointer to the hardware structure
5007 * @em_list: list of ethertype and forwarding information
5009 * Function add ethertype rule for logical port from HW struct
5012 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5014 if (!em_list || !hw)
5015 return ICE_ERR_PARAM;
5017 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5018 hw->port_info->lport);
5022 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5023 * @hw: pointer to the hardware structure
5024 * @em_list: list of ethertype or ethertype MAC entries
5025 * @sw: pointer to switch info struct for which function add rule
5027 static enum ice_status
5028 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5029 struct ice_switch_info *sw)
5031 struct ice_fltr_list_entry *em_list_itr, *tmp;
5033 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5035 struct ice_sw_recipe *recp_list;
5036 enum ice_sw_lkup_type l_type;
5038 l_type = em_list_itr->fltr_info.lkup_type;
5040 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5041 l_type != ICE_SW_LKUP_ETHERTYPE)
5042 return ICE_ERR_PARAM;
5044 recp_list = &sw->recp_list[l_type];
5045 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5047 if (em_list_itr->status)
5048 return em_list_itr->status;
5054 * ice_remove_eth_mac - remove a ethertype based filter rule
5055 * @hw: pointer to the hardware structure
5056 * @em_list: list of ethertype and forwarding information
5060 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5062 if (!em_list || !hw)
5063 return ICE_ERR_PARAM;
5065 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5069 * ice_rem_sw_rule_info
5070 * @hw: pointer to the hardware structure
5071 * @rule_head: pointer to the switch list structure that we want to delete
5074 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5076 if (!LIST_EMPTY(rule_head)) {
5077 struct ice_fltr_mgmt_list_entry *entry;
5078 struct ice_fltr_mgmt_list_entry *tmp;
5080 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5081 ice_fltr_mgmt_list_entry, list_entry) {
5082 LIST_DEL(&entry->list_entry);
5083 ice_free(hw, entry);
5089 * ice_rem_adv_rule_info
5090 * @hw: pointer to the hardware structure
5091 * @rule_head: pointer to the switch list structure that we want to delete
5094 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5096 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5097 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5099 if (LIST_EMPTY(rule_head))
5102 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5103 ice_adv_fltr_mgmt_list_entry, list_entry) {
5104 LIST_DEL(&lst_itr->list_entry);
5105 ice_free(hw, lst_itr->lkups);
5106 ice_free(hw, lst_itr);
5111 * ice_rem_all_sw_rules_info
5112 * @hw: pointer to the hardware structure
5114 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5116 struct ice_switch_info *sw = hw->switch_info;
5119 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5120 struct LIST_HEAD_TYPE *rule_head;
5122 rule_head = &sw->recp_list[i].filt_rules;
5123 if (!sw->recp_list[i].adv_rule)
5124 ice_rem_sw_rule_info(hw, rule_head);
5126 ice_rem_adv_rule_info(hw, rule_head);
5127 if (sw->recp_list[i].adv_rule &&
5128 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5129 sw->recp_list[i].adv_rule = false;
5134 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5135 * @pi: pointer to the port_info structure
5136 * @vsi_handle: VSI handle to set as default
5137 * @set: true to add the above mentioned switch rule, false to remove it
5138 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5140 * add filter rule to set/unset given VSI as default VSI for the switch
5141 * (represented by swid)
5144 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5147 struct ice_aqc_sw_rules_elem *s_rule;
5148 struct ice_fltr_info f_info;
5149 struct ice_hw *hw = pi->hw;
5150 enum ice_adminq_opc opcode;
5151 enum ice_status status;
5155 if (!ice_is_vsi_valid(hw, vsi_handle))
5156 return ICE_ERR_PARAM;
5157 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5159 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5160 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5162 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5164 return ICE_ERR_NO_MEMORY;
5166 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5168 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5169 f_info.flag = direction;
5170 f_info.fltr_act = ICE_FWD_TO_VSI;
5171 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5173 if (f_info.flag & ICE_FLTR_RX) {
5174 f_info.src = pi->lport;
5175 f_info.src_id = ICE_SRC_ID_LPORT;
5177 f_info.fltr_rule_id =
5178 pi->dflt_rx_vsi_rule_id;
5179 } else if (f_info.flag & ICE_FLTR_TX) {
5180 f_info.src_id = ICE_SRC_ID_VSI;
5181 f_info.src = hw_vsi_id;
5183 f_info.fltr_rule_id =
5184 pi->dflt_tx_vsi_rule_id;
5188 opcode = ice_aqc_opc_add_sw_rules;
5190 opcode = ice_aqc_opc_remove_sw_rules;
5192 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5194 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5195 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5198 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5200 if (f_info.flag & ICE_FLTR_TX) {
5201 pi->dflt_tx_vsi_num = hw_vsi_id;
5202 pi->dflt_tx_vsi_rule_id = index;
5203 } else if (f_info.flag & ICE_FLTR_RX) {
5204 pi->dflt_rx_vsi_num = hw_vsi_id;
5205 pi->dflt_rx_vsi_rule_id = index;
5208 if (f_info.flag & ICE_FLTR_TX) {
5209 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5210 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5211 } else if (f_info.flag & ICE_FLTR_RX) {
5212 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5213 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5218 ice_free(hw, s_rule);
5223 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5224 * @list_head: head of rule list
5225 * @f_info: rule information
5227 * Helper function to search for a unicast rule entry - this is to be used
5228 * to remove unicast MAC filter that is not shared with other VSIs on the
5231 * Returns pointer to entry storing the rule if found
5233 static struct ice_fltr_mgmt_list_entry *
5234 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5235 struct ice_fltr_info *f_info)
5237 struct ice_fltr_mgmt_list_entry *list_itr;
5239 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5241 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5242 sizeof(f_info->l_data)) &&
5243 f_info->fwd_id.hw_vsi_id ==
5244 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5245 f_info->flag == list_itr->fltr_info.flag)
5252 * ice_remove_mac_rule - remove a MAC based filter rule
5253 * @hw: pointer to the hardware structure
5254 * @m_list: list of MAC addresses and forwarding information
5255 * @recp_list: list from which function remove MAC address
5257 * This function removes either a MAC filter rule or a specific VSI from a
5258 * VSI list for a multicast MAC address.
5260 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5261 * ice_add_mac. Caller should be aware that this call will only work if all
5262 * the entries passed into m_list were added previously. It will not attempt to
5263 * do a partial remove of entries that were found.
5265 static enum ice_status
5266 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5267 struct ice_sw_recipe *recp_list)
5269 struct ice_fltr_list_entry *list_itr, *tmp;
5270 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5273 return ICE_ERR_PARAM;
5275 rule_lock = &recp_list->filt_rule_lock;
5276 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5278 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5279 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5282 if (l_type != ICE_SW_LKUP_MAC)
5283 return ICE_ERR_PARAM;
5285 vsi_handle = list_itr->fltr_info.vsi_handle;
5286 if (!ice_is_vsi_valid(hw, vsi_handle))
5287 return ICE_ERR_PARAM;
5289 list_itr->fltr_info.fwd_id.hw_vsi_id =
5290 ice_get_hw_vsi_num(hw, vsi_handle);
5291 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5292 /* Don't remove the unicast address that belongs to
5293 * another VSI on the switch, since it is not being
5296 ice_acquire_lock(rule_lock);
5297 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5298 &list_itr->fltr_info)) {
5299 ice_release_lock(rule_lock);
5300 return ICE_ERR_DOES_NOT_EXIST;
5302 ice_release_lock(rule_lock);
5304 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5306 if (list_itr->status)
5307 return list_itr->status;
5313 * ice_remove_mac - remove a MAC address based filter rule
5314 * @hw: pointer to the hardware structure
5315 * @m_list: list of MAC addresses and forwarding information
5318 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5320 struct ice_sw_recipe *recp_list;
5322 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5323 return ice_remove_mac_rule(hw, m_list, recp_list);
5327 * ice_remove_vlan_rule - Remove VLAN based filter rule
5328 * @hw: pointer to the hardware structure
5329 * @v_list: list of VLAN entries and forwarding information
5330 * @recp_list: list from which function remove VLAN
5332 static enum ice_status
5333 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5334 struct ice_sw_recipe *recp_list)
5336 struct ice_fltr_list_entry *v_list_itr, *tmp;
5338 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5340 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5342 if (l_type != ICE_SW_LKUP_VLAN)
5343 return ICE_ERR_PARAM;
5344 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5346 if (v_list_itr->status)
5347 return v_list_itr->status;
5353 * ice_remove_vlan - remove a VLAN address based filter rule
5354 * @hw: pointer to the hardware structure
5355 * @v_list: list of VLAN and forwarding information
5359 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5361 struct ice_sw_recipe *recp_list;
5364 return ICE_ERR_PARAM;
5366 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5367 return ice_remove_vlan_rule(hw, v_list, recp_list);
5371 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5372 * @hw: pointer to the hardware structure
5373 * @v_list: list of MAC VLAN entries and forwarding information
5374 * @recp_list: list from which function remove MAC VLAN
5376 static enum ice_status
5377 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5378 struct ice_sw_recipe *recp_list)
5380 struct ice_fltr_list_entry *v_list_itr, *tmp;
5382 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5383 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5385 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5387 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5388 return ICE_ERR_PARAM;
5389 v_list_itr->status =
5390 ice_remove_rule_internal(hw, recp_list,
5392 if (v_list_itr->status)
5393 return v_list_itr->status;
5399 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5400 * @hw: pointer to the hardware structure
5401 * @mv_list: list of MAC VLAN and forwarding information
5404 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5406 struct ice_sw_recipe *recp_list;
5408 if (!mv_list || !hw)
5409 return ICE_ERR_PARAM;
5411 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5412 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5416 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5417 * @fm_entry: filter entry to inspect
5418 * @vsi_handle: VSI handle to compare with filter info
5421 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5423 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5424 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5425 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5426 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5431 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5432 * @hw: pointer to the hardware structure
5433 * @vsi_handle: VSI handle to remove filters from
5434 * @vsi_list_head: pointer to the list to add entry to
5435 * @fi: pointer to fltr_info of filter entry to copy & add
5437 * Helper function, used when creating a list of filters to remove from
5438 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5439 * original filter entry, with the exception of fltr_info.fltr_act and
5440 * fltr_info.fwd_id fields. These are set such that later logic can
5441 * extract which VSI to remove the fltr from, and pass on that information.
5443 static enum ice_status
5444 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5445 struct LIST_HEAD_TYPE *vsi_list_head,
5446 struct ice_fltr_info *fi)
5448 struct ice_fltr_list_entry *tmp;
5450 /* this memory is freed up in the caller function
5451 * once filters for this VSI are removed
5453 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5455 return ICE_ERR_NO_MEMORY;
5457 tmp->fltr_info = *fi;
5459 /* Overwrite these fields to indicate which VSI to remove filter from,
5460 * so find and remove logic can extract the information from the
5461 * list entries. Note that original entries will still have proper
5464 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5465 tmp->fltr_info.vsi_handle = vsi_handle;
5466 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5468 LIST_ADD(&tmp->list_entry, vsi_list_head);
5474 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5475 * @hw: pointer to the hardware structure
5476 * @vsi_handle: VSI handle to remove filters from
5477 * @lkup_list_head: pointer to the list that has certain lookup type filters
5478 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5480 * Locates all filters in lkup_list_head that are used by the given VSI,
5481 * and adds COPIES of those entries to vsi_list_head (intended to be used
5482 * to remove the listed filters).
5483 * Note that this means all entries in vsi_list_head must be explicitly
5484 * deallocated by the caller when done with list.
5486 static enum ice_status
5487 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5488 struct LIST_HEAD_TYPE *lkup_list_head,
5489 struct LIST_HEAD_TYPE *vsi_list_head)
5491 struct ice_fltr_mgmt_list_entry *fm_entry;
5492 enum ice_status status = ICE_SUCCESS;
5494 /* check to make sure VSI ID is valid and within boundary */
5495 if (!ice_is_vsi_valid(hw, vsi_handle))
5496 return ICE_ERR_PARAM;
5498 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5499 ice_fltr_mgmt_list_entry, list_entry) {
5500 struct ice_fltr_info *fi;
5502 fi = &fm_entry->fltr_info;
5503 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5506 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5515 * ice_determine_promisc_mask
5516 * @fi: filter info to parse
5518 * Helper function to determine which ICE_PROMISC_ mask corresponds
5519 * to given filter into.
5521 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5523 u16 vid = fi->l_data.mac_vlan.vlan_id;
5524 u8 *macaddr = fi->l_data.mac.mac_addr;
5525 bool is_tx_fltr = false;
5526 u8 promisc_mask = 0;
5528 if (fi->flag == ICE_FLTR_TX)
5531 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5532 promisc_mask |= is_tx_fltr ?
5533 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5534 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5535 promisc_mask |= is_tx_fltr ?
5536 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5537 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5538 promisc_mask |= is_tx_fltr ?
5539 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5541 promisc_mask |= is_tx_fltr ?
5542 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5544 return promisc_mask;
5548 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5549 * @hw: pointer to the hardware structure
5550 * @vsi_handle: VSI handle to retrieve info from
5551 * @promisc_mask: pointer to mask to be filled in
5552 * @vid: VLAN ID of promisc VLAN VSI
5553 * @sw: pointer to switch info struct for which function add rule
5555 static enum ice_status
5556 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5557 u16 *vid, struct ice_switch_info *sw)
5559 struct ice_fltr_mgmt_list_entry *itr;
5560 struct LIST_HEAD_TYPE *rule_head;
5561 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5563 if (!ice_is_vsi_valid(hw, vsi_handle))
5564 return ICE_ERR_PARAM;
5568 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5569 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5571 ice_acquire_lock(rule_lock);
5572 LIST_FOR_EACH_ENTRY(itr, rule_head,
5573 ice_fltr_mgmt_list_entry, list_entry) {
5574 /* Continue if this filter doesn't apply to this VSI or the
5575 * VSI ID is not in the VSI map for this filter
5577 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5580 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5582 ice_release_lock(rule_lock);
5588 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5589 * @hw: pointer to the hardware structure
5590 * @vsi_handle: VSI handle to retrieve info from
5591 * @promisc_mask: pointer to mask to be filled in
5592 * @vid: VLAN ID of promisc VLAN VSI
5595 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5598 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5599 vid, hw->switch_info);
5603 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5604 * @hw: pointer to the hardware structure
5605 * @vsi_handle: VSI handle to retrieve info from
5606 * @promisc_mask: pointer to mask to be filled in
5607 * @vid: VLAN ID of promisc VLAN VSI
5608 * @sw: pointer to switch info struct for which function add rule
5610 static enum ice_status
5611 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5612 u16 *vid, struct ice_switch_info *sw)
5614 struct ice_fltr_mgmt_list_entry *itr;
5615 struct LIST_HEAD_TYPE *rule_head;
5616 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5618 if (!ice_is_vsi_valid(hw, vsi_handle))
5619 return ICE_ERR_PARAM;
5623 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5624 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5626 ice_acquire_lock(rule_lock);
5627 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5629 /* Continue if this filter doesn't apply to this VSI or the
5630 * VSI ID is not in the VSI map for this filter
5632 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5635 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5637 ice_release_lock(rule_lock);
5643 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5644 * @hw: pointer to the hardware structure
5645 * @vsi_handle: VSI handle to retrieve info from
5646 * @promisc_mask: pointer to mask to be filled in
5647 * @vid: VLAN ID of promisc VLAN VSI
5650 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5653 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5654 vid, hw->switch_info);
5658 * ice_remove_promisc - Remove promisc based filter rules
5659 * @hw: pointer to the hardware structure
5660 * @recp_id: recipe ID for which the rule needs to removed
5661 * @v_list: list of promisc entries
5663 static enum ice_status
5664 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5665 struct LIST_HEAD_TYPE *v_list)
5667 struct ice_fltr_list_entry *v_list_itr, *tmp;
5668 struct ice_sw_recipe *recp_list;
5670 recp_list = &hw->switch_info->recp_list[recp_id];
5671 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5673 v_list_itr->status =
5674 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5675 if (v_list_itr->status)
5676 return v_list_itr->status;
5682 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5683 * @hw: pointer to the hardware structure
5684 * @vsi_handle: VSI handle to clear mode
5685 * @promisc_mask: mask of promiscuous config bits to clear
5686 * @vid: VLAN ID to clear VLAN promiscuous
5687 * @sw: pointer to switch info struct for which function add rule
5689 static enum ice_status
5690 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5691 u16 vid, struct ice_switch_info *sw)
5693 struct ice_fltr_list_entry *fm_entry, *tmp;
5694 struct LIST_HEAD_TYPE remove_list_head;
5695 struct ice_fltr_mgmt_list_entry *itr;
5696 struct LIST_HEAD_TYPE *rule_head;
5697 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5698 enum ice_status status = ICE_SUCCESS;
5701 if (!ice_is_vsi_valid(hw, vsi_handle))
5702 return ICE_ERR_PARAM;
5704 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5705 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5707 recipe_id = ICE_SW_LKUP_PROMISC;
5709 rule_head = &sw->recp_list[recipe_id].filt_rules;
5710 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5712 INIT_LIST_HEAD(&remove_list_head);
5714 ice_acquire_lock(rule_lock);
5715 LIST_FOR_EACH_ENTRY(itr, rule_head,
5716 ice_fltr_mgmt_list_entry, list_entry) {
5717 struct ice_fltr_info *fltr_info;
5718 u8 fltr_promisc_mask = 0;
5720 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5722 fltr_info = &itr->fltr_info;
5724 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5725 vid != fltr_info->l_data.mac_vlan.vlan_id)
5728 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5730 /* Skip if filter is not completely specified by given mask */
5731 if (fltr_promisc_mask & ~promisc_mask)
5734 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5738 ice_release_lock(rule_lock);
5739 goto free_fltr_list;
5742 ice_release_lock(rule_lock);
5744 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5747 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5748 ice_fltr_list_entry, list_entry) {
5749 LIST_DEL(&fm_entry->list_entry);
5750 ice_free(hw, fm_entry);
5757 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5758 * @hw: pointer to the hardware structure
5759 * @vsi_handle: VSI handle to clear mode
5760 * @promisc_mask: mask of promiscuous config bits to clear
5761 * @vid: VLAN ID to clear VLAN promiscuous
5764 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5765 u8 promisc_mask, u16 vid)
5767 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5768 vid, hw->switch_info);
5772 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5773 * @hw: pointer to the hardware structure
5774 * @vsi_handle: VSI handle to configure
5775 * @promisc_mask: mask of promiscuous config bits
5776 * @vid: VLAN ID to set VLAN promiscuous
5777 * @lport: logical port number to configure promisc mode
5778 * @sw: pointer to switch info struct for which function add rule
5780 static enum ice_status
5781 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5782 u16 vid, u8 lport, struct ice_switch_info *sw)
5784 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5785 struct ice_fltr_list_entry f_list_entry;
5786 struct ice_fltr_info new_fltr;
5787 enum ice_status status = ICE_SUCCESS;
5793 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5795 if (!ice_is_vsi_valid(hw, vsi_handle))
5796 return ICE_ERR_PARAM;
5797 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5799 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5801 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5802 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5803 new_fltr.l_data.mac_vlan.vlan_id = vid;
5804 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5806 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5807 recipe_id = ICE_SW_LKUP_PROMISC;
5810 /* Separate filters must be set for each direction/packet type
5811 * combination, so we will loop over the mask value, store the
5812 * individual type, and clear it out in the input mask as it
5815 while (promisc_mask) {
5816 struct ice_sw_recipe *recp_list;
5822 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5823 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5824 pkt_type = UCAST_FLTR;
5825 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5826 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5827 pkt_type = UCAST_FLTR;
5829 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5830 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5831 pkt_type = MCAST_FLTR;
5832 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5833 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5834 pkt_type = MCAST_FLTR;
5836 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5837 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5838 pkt_type = BCAST_FLTR;
5839 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5840 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5841 pkt_type = BCAST_FLTR;
5845 /* Check for VLAN promiscuous flag */
5846 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5847 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5848 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5849 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5853 /* Set filter DA based on packet type */
5854 mac_addr = new_fltr.l_data.mac.mac_addr;
5855 if (pkt_type == BCAST_FLTR) {
5856 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5857 } else if (pkt_type == MCAST_FLTR ||
5858 pkt_type == UCAST_FLTR) {
5859 /* Use the dummy ether header DA */
5860 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5861 ICE_NONDMA_TO_NONDMA);
5862 if (pkt_type == MCAST_FLTR)
5863 mac_addr[0] |= 0x1; /* Set multicast bit */
5866 /* Need to reset this to zero for all iterations */
5869 new_fltr.flag |= ICE_FLTR_TX;
5870 new_fltr.src = hw_vsi_id;
5872 new_fltr.flag |= ICE_FLTR_RX;
5873 new_fltr.src = lport;
5876 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5877 new_fltr.vsi_handle = vsi_handle;
5878 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5879 f_list_entry.fltr_info = new_fltr;
5880 recp_list = &sw->recp_list[recipe_id];
5882 status = ice_add_rule_internal(hw, recp_list, lport,
5884 if (status != ICE_SUCCESS)
5885 goto set_promisc_exit;
5893 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5894 * @hw: pointer to the hardware structure
5895 * @vsi_handle: VSI handle to configure
5896 * @promisc_mask: mask of promiscuous config bits
5897 * @vid: VLAN ID to set VLAN promiscuous
5900 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5903 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5904 hw->port_info->lport,
5909 * _ice_set_vlan_vsi_promisc
5910 * @hw: pointer to the hardware structure
5911 * @vsi_handle: VSI handle to configure
5912 * @promisc_mask: mask of promiscuous config bits
5913 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5914 * @lport: logical port number to configure promisc mode
5915 * @sw: pointer to switch info struct for which function add rule
5917 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5919 static enum ice_status
5920 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5921 bool rm_vlan_promisc, u8 lport,
5922 struct ice_switch_info *sw)
5924 struct ice_fltr_list_entry *list_itr, *tmp;
5925 struct LIST_HEAD_TYPE vsi_list_head;
5926 struct LIST_HEAD_TYPE *vlan_head;
5927 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5928 enum ice_status status;
5931 INIT_LIST_HEAD(&vsi_list_head);
5932 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5933 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5934 ice_acquire_lock(vlan_lock);
5935 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5937 ice_release_lock(vlan_lock);
5939 goto free_fltr_list;
5941 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5943 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5944 if (rm_vlan_promisc)
5945 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5949 status = _ice_set_vsi_promisc(hw, vsi_handle,
5950 promisc_mask, vlan_id,
5957 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5958 ice_fltr_list_entry, list_entry) {
5959 LIST_DEL(&list_itr->list_entry);
5960 ice_free(hw, list_itr);
5966 * ice_set_vlan_vsi_promisc
5967 * @hw: pointer to the hardware structure
5968 * @vsi_handle: VSI handle to configure
5969 * @promisc_mask: mask of promiscuous config bits
5970 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5972 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5975 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5976 bool rm_vlan_promisc)
5978 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5979 rm_vlan_promisc, hw->port_info->lport,
5984 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5985 * @hw: pointer to the hardware structure
5986 * @vsi_handle: VSI handle to remove filters from
5987 * @recp_list: recipe list from which function remove fltr
5988 * @lkup: switch rule filter lookup type
5991 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5992 struct ice_sw_recipe *recp_list,
5993 enum ice_sw_lkup_type lkup)
5995 struct ice_fltr_list_entry *fm_entry;
5996 struct LIST_HEAD_TYPE remove_list_head;
5997 struct LIST_HEAD_TYPE *rule_head;
5998 struct ice_fltr_list_entry *tmp;
5999 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6000 enum ice_status status;
6002 INIT_LIST_HEAD(&remove_list_head);
6003 rule_lock = &recp_list[lkup].filt_rule_lock;
6004 rule_head = &recp_list[lkup].filt_rules;
6005 ice_acquire_lock(rule_lock);
6006 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6008 ice_release_lock(rule_lock);
6013 case ICE_SW_LKUP_MAC:
6014 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6016 case ICE_SW_LKUP_VLAN:
6017 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6019 case ICE_SW_LKUP_PROMISC:
6020 case ICE_SW_LKUP_PROMISC_VLAN:
6021 ice_remove_promisc(hw, lkup, &remove_list_head);
6023 case ICE_SW_LKUP_MAC_VLAN:
6024 ice_remove_mac_vlan(hw, &remove_list_head);
6026 case ICE_SW_LKUP_ETHERTYPE:
6027 case ICE_SW_LKUP_ETHERTYPE_MAC:
6028 ice_remove_eth_mac(hw, &remove_list_head);
6030 case ICE_SW_LKUP_DFLT:
6031 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6033 case ICE_SW_LKUP_LAST:
6034 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6038 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6039 ice_fltr_list_entry, list_entry) {
6040 LIST_DEL(&fm_entry->list_entry);
6041 ice_free(hw, fm_entry);
6046 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6047 * @hw: pointer to the hardware structure
6048 * @vsi_handle: VSI handle to remove filters from
6049 * @sw: pointer to switch info struct
6052 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6053 struct ice_switch_info *sw)
6055 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6057 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6058 sw->recp_list, ICE_SW_LKUP_MAC);
6059 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6060 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6061 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6062 sw->recp_list, ICE_SW_LKUP_PROMISC);
6063 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6064 sw->recp_list, ICE_SW_LKUP_VLAN);
6065 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6066 sw->recp_list, ICE_SW_LKUP_DFLT);
6067 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6068 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6069 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6070 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6071 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6072 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6076 * ice_remove_vsi_fltr - Remove all filters for a VSI
6077 * @hw: pointer to the hardware structure
6078 * @vsi_handle: VSI handle to remove filters from
6080 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6082 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6086 * ice_alloc_res_cntr - allocating resource counter
6087 * @hw: pointer to the hardware structure
6088 * @type: type of resource
6089 * @alloc_shared: if set it is shared else dedicated
6090 * @num_items: number of entries requested for FD resource type
6091 * @counter_id: counter index returned by AQ call
6094 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6097 struct ice_aqc_alloc_free_res_elem *buf;
6098 enum ice_status status;
6101 /* Allocate resource */
6102 buf_len = ice_struct_size(buf, elem, 1);
6103 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6105 return ICE_ERR_NO_MEMORY;
6107 buf->num_elems = CPU_TO_LE16(num_items);
6108 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6109 ICE_AQC_RES_TYPE_M) | alloc_shared);
6111 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6112 ice_aqc_opc_alloc_res, NULL);
6116 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6124 * ice_free_res_cntr - free resource counter
6125 * @hw: pointer to the hardware structure
6126 * @type: type of resource
6127 * @alloc_shared: if set it is shared else dedicated
6128 * @num_items: number of entries to be freed for FD resource type
6129 * @counter_id: counter ID resource which needs to be freed
6132 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6135 struct ice_aqc_alloc_free_res_elem *buf;
6136 enum ice_status status;
6140 buf_len = ice_struct_size(buf, elem, 1);
6141 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6143 return ICE_ERR_NO_MEMORY;
6145 buf->num_elems = CPU_TO_LE16(num_items);
6146 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6147 ICE_AQC_RES_TYPE_M) | alloc_shared);
6148 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6150 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6151 ice_aqc_opc_free_res, NULL);
6153 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6160 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6161 * @hw: pointer to the hardware structure
6162 * @counter_id: returns counter index
6164 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6166 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6167 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6172 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6173 * @hw: pointer to the hardware structure
6174 * @counter_id: counter index to be freed
6176 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6178 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6179 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6184 * ice_alloc_res_lg_act - add large action resource
6185 * @hw: pointer to the hardware structure
6186 * @l_id: large action ID to fill it in
6187 * @num_acts: number of actions to hold with a large action entry
6189 static enum ice_status
6190 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6192 struct ice_aqc_alloc_free_res_elem *sw_buf;
6193 enum ice_status status;
6196 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6197 return ICE_ERR_PARAM;
6199 /* Allocate resource for large action */
6200 buf_len = ice_struct_size(sw_buf, elem, 1);
6201 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6203 return ICE_ERR_NO_MEMORY;
6205 sw_buf->num_elems = CPU_TO_LE16(1);
6207 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6208 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6209 * If num_acts is greater than 2, then use
6210 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6211 * The num_acts cannot exceed 4. This was ensured at the
6212 * beginning of the function.
6215 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6216 else if (num_acts == 2)
6217 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6219 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6221 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6222 ice_aqc_opc_alloc_res, NULL);
6224 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6226 ice_free(hw, sw_buf);
6231 * ice_add_mac_with_sw_marker - add filter with sw marker
6232 * @hw: pointer to the hardware structure
6233 * @f_info: filter info structure containing the MAC filter information
6234 * @sw_marker: sw marker to tag the Rx descriptor with
6237 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6240 struct ice_fltr_mgmt_list_entry *m_entry;
6241 struct ice_fltr_list_entry fl_info;
6242 struct ice_sw_recipe *recp_list;
6243 struct LIST_HEAD_TYPE l_head;
6244 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6245 enum ice_status ret;
6249 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6250 return ICE_ERR_PARAM;
6252 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6253 return ICE_ERR_PARAM;
6255 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6256 return ICE_ERR_PARAM;
6258 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6259 return ICE_ERR_PARAM;
6260 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6262 /* Add filter if it doesn't exist so then the adding of large
6263 * action always results in update
6266 INIT_LIST_HEAD(&l_head);
6267 fl_info.fltr_info = *f_info;
6268 LIST_ADD(&fl_info.list_entry, &l_head);
6270 entry_exists = false;
6271 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6272 hw->port_info->lport);
6273 if (ret == ICE_ERR_ALREADY_EXISTS)
6274 entry_exists = true;
6278 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6279 rule_lock = &recp_list->filt_rule_lock;
6280 ice_acquire_lock(rule_lock);
6281 /* Get the book keeping entry for the filter */
6282 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6286 /* If counter action was enabled for this rule then don't enable
6287 * sw marker large action
6289 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6290 ret = ICE_ERR_PARAM;
6294 /* if same marker was added before */
6295 if (m_entry->sw_marker_id == sw_marker) {
6296 ret = ICE_ERR_ALREADY_EXISTS;
6300 /* Allocate a hardware table entry to hold large act. Three actions
6301 * for marker based large action
6303 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6307 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6310 /* Update the switch rule to add the marker action */
6311 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6313 ice_release_lock(rule_lock);
6318 ice_release_lock(rule_lock);
6319 /* only remove entry if it did not exist previously */
6321 ret = ice_remove_mac(hw, &l_head);
6327 * ice_add_mac_with_counter - add filter with counter enabled
6328 * @hw: pointer to the hardware structure
6329 * @f_info: pointer to filter info structure containing the MAC filter
6333 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6335 struct ice_fltr_mgmt_list_entry *m_entry;
6336 struct ice_fltr_list_entry fl_info;
6337 struct ice_sw_recipe *recp_list;
6338 struct LIST_HEAD_TYPE l_head;
6339 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6340 enum ice_status ret;
6345 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6346 return ICE_ERR_PARAM;
6348 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6349 return ICE_ERR_PARAM;
6351 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6352 return ICE_ERR_PARAM;
6353 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6354 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6356 entry_exist = false;
6358 rule_lock = &recp_list->filt_rule_lock;
6360 /* Add filter if it doesn't exist so then the adding of large
6361 * action always results in update
6363 INIT_LIST_HEAD(&l_head);
6365 fl_info.fltr_info = *f_info;
6366 LIST_ADD(&fl_info.list_entry, &l_head);
6368 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6369 hw->port_info->lport);
6370 if (ret == ICE_ERR_ALREADY_EXISTS)
6375 ice_acquire_lock(rule_lock);
6376 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6378 ret = ICE_ERR_BAD_PTR;
6382 /* Don't enable counter for a filter for which sw marker was enabled */
6383 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6384 ret = ICE_ERR_PARAM;
6388 /* If a counter was already enabled then don't need to add again */
6389 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6390 ret = ICE_ERR_ALREADY_EXISTS;
6394 /* Allocate a hardware table entry to VLAN counter */
6395 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6399 /* Allocate a hardware table entry to hold large act. Two actions for
6400 * counter based large action
6402 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6406 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6409 /* Update the switch rule to add the counter action */
6410 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6412 ice_release_lock(rule_lock);
6417 ice_release_lock(rule_lock);
6418 /* only remove entry if it did not exist previously */
6420 ret = ice_remove_mac(hw, &l_head);
6425 /* This is mapping table entry that maps every word within a given protocol
6426 * structure to the real byte offset as per the specification of that
6428 * for example dst address is 3 words in ethertype header and corresponding
6429 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6430 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6431 * matching entry describing its field. This needs to be updated if new
6432 * structure is added to that union.
6434 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6435 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6436 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6437 { ICE_ETYPE_OL, { 0 } },
6438 { ICE_VLAN_OFOS, { 0, 2 } },
6439 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6440 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6441 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6442 26, 28, 30, 32, 34, 36, 38 } },
6443 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6444 26, 28, 30, 32, 34, 36, 38 } },
6445 { ICE_TCP_IL, { 0, 2 } },
6446 { ICE_UDP_OF, { 0, 2 } },
6447 { ICE_UDP_ILOS, { 0, 2 } },
6448 { ICE_SCTP_IL, { 0, 2 } },
6449 { ICE_VXLAN, { 8, 10, 12, 14 } },
6450 { ICE_GENEVE, { 8, 10, 12, 14 } },
6451 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6452 { ICE_NVGRE, { 0, 2, 4, 6 } },
6453 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6454 { ICE_PPPOE, { 0, 2, 4, 6 } },
6455 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6456 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6457 { ICE_ESP, { 0, 2, 4, 6 } },
6458 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6459 { ICE_NAT_T, { 8, 10, 12, 14 } },
6460 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6461 { ICE_VLAN_EX, { 0, 2 } },
6464 /* The following table describes preferred grouping of recipes.
6465 * If a recipe that needs to be programmed is a superset or matches one of the
6466 * following combinations, then the recipe needs to be chained as per the
6470 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6471 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6472 { ICE_MAC_IL, ICE_MAC_IL_HW },
6473 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6474 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6475 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6476 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6477 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6478 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6479 { ICE_TCP_IL, ICE_TCP_IL_HW },
6480 { ICE_UDP_OF, ICE_UDP_OF_HW },
6481 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6482 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6483 { ICE_VXLAN, ICE_UDP_OF_HW },
6484 { ICE_GENEVE, ICE_UDP_OF_HW },
6485 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6486 { ICE_NVGRE, ICE_GRE_OF_HW },
6487 { ICE_GTP, ICE_UDP_OF_HW },
6488 { ICE_PPPOE, ICE_PPPOE_HW },
6489 { ICE_PFCP, ICE_UDP_ILOS_HW },
6490 { ICE_L2TPV3, ICE_L2TPV3_HW },
6491 { ICE_ESP, ICE_ESP_HW },
6492 { ICE_AH, ICE_AH_HW },
6493 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6494 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6495 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6499 * ice_find_recp - find a recipe
6500 * @hw: pointer to the hardware structure
6501 * @lkup_exts: extension sequence to match
6503 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6505 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6506 enum ice_sw_tunnel_type tun_type)
6508 bool refresh_required = true;
6509 struct ice_sw_recipe *recp;
6512 /* Walk through existing recipes to find a match */
6513 recp = hw->switch_info->recp_list;
6514 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6515 /* If recipe was not created for this ID, in SW bookkeeping,
6516 * check if FW has an entry for this recipe. If the FW has an
6517 * entry update it in our SW bookkeeping and continue with the
6520 if (!recp[i].recp_created)
6521 if (ice_get_recp_frm_fw(hw,
6522 hw->switch_info->recp_list, i,
6526 /* Skip inverse action recipes */
6527 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6528 ICE_AQ_RECIPE_ACT_INV_ACT)
6531 /* if number of words we are looking for match */
6532 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6533 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6534 struct ice_fv_word *be = lkup_exts->fv_words;
6535 u16 *cr = recp[i].lkup_exts.field_mask;
6536 u16 *de = lkup_exts->field_mask;
6540 /* ar, cr, and qr are related to the recipe words, while
6541 * be, de, and pe are related to the lookup words
6543 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6544 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6546 if (ar[qr].off == be[pe].off &&
6547 ar[qr].prot_id == be[pe].prot_id &&
6549 /* Found the "pe"th word in the
6554 /* After walking through all the words in the
6555 * "i"th recipe if "p"th word was not found then
6556 * this recipe is not what we are looking for.
6557 * So break out from this loop and try the next
6560 if (qr >= recp[i].lkup_exts.n_val_words) {
6565 /* If for "i"th recipe the found was never set to false
6566 * then it means we found our match
6568 if (tun_type == recp[i].tun_type && found)
6569 return i; /* Return the recipe ID */
6572 return ICE_MAX_NUM_RECIPES;
6576 * ice_prot_type_to_id - get protocol ID from protocol type
6577 * @type: protocol type
6578 * @id: pointer to variable that will receive the ID
6580 * Returns true if found, false otherwise
6582 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6586 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6587 if (ice_prot_id_tbl[i].type == type) {
6588 *id = ice_prot_id_tbl[i].protocol_id;
6595 * ice_find_valid_words - count valid words
6596 * @rule: advanced rule with lookup information
6597 * @lkup_exts: byte offset extractions of the words that are valid
6599 * calculate valid words in a lookup rule using mask value
6602 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6603 struct ice_prot_lkup_ext *lkup_exts)
6605 u8 j, word, prot_id, ret_val;
6607 if (!ice_prot_type_to_id(rule->type, &prot_id))
6610 word = lkup_exts->n_val_words;
6612 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6613 if (((u16 *)&rule->m_u)[j] &&
6614 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6615 /* No more space to accommodate */
6616 if (word >= ICE_MAX_CHAIN_WORDS)
6618 lkup_exts->fv_words[word].off =
6619 ice_prot_ext[rule->type].offs[j];
6620 lkup_exts->fv_words[word].prot_id =
6621 ice_prot_id_tbl[rule->type].protocol_id;
6622 lkup_exts->field_mask[word] =
6623 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6627 ret_val = word - lkup_exts->n_val_words;
6628 lkup_exts->n_val_words = word;
6634 * ice_create_first_fit_recp_def - Create a recipe grouping
6635 * @hw: pointer to the hardware structure
6636 * @lkup_exts: an array of protocol header extractions
6637 * @rg_list: pointer to a list that stores new recipe groups
6638 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6640 * Using first fit algorithm, take all the words that are still not done
6641 * and start grouping them in 4-word groups. Each group makes up one
6644 static enum ice_status
6645 ice_create_first_fit_recp_def(struct ice_hw *hw,
6646 struct ice_prot_lkup_ext *lkup_exts,
6647 struct LIST_HEAD_TYPE *rg_list,
6650 struct ice_pref_recipe_group *grp = NULL;
6655 if (!lkup_exts->n_val_words) {
6656 struct ice_recp_grp_entry *entry;
6658 entry = (struct ice_recp_grp_entry *)
6659 ice_malloc(hw, sizeof(*entry));
6661 return ICE_ERR_NO_MEMORY;
6662 LIST_ADD(&entry->l_entry, rg_list);
6663 grp = &entry->r_group;
6665 grp->n_val_pairs = 0;
6668 /* Walk through every word in the rule to check if it is not done. If so
6669 * then this word needs to be part of a new recipe.
6671 for (j = 0; j < lkup_exts->n_val_words; j++)
6672 if (!ice_is_bit_set(lkup_exts->done, j)) {
6674 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6675 struct ice_recp_grp_entry *entry;
6677 entry = (struct ice_recp_grp_entry *)
6678 ice_malloc(hw, sizeof(*entry));
6680 return ICE_ERR_NO_MEMORY;
6681 LIST_ADD(&entry->l_entry, rg_list);
6682 grp = &entry->r_group;
6686 grp->pairs[grp->n_val_pairs].prot_id =
6687 lkup_exts->fv_words[j].prot_id;
6688 grp->pairs[grp->n_val_pairs].off =
6689 lkup_exts->fv_words[j].off;
6690 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6698 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6699 * @hw: pointer to the hardware structure
6700 * @fv_list: field vector with the extraction sequence information
6701 * @rg_list: recipe groupings with protocol-offset pairs
6703 * Helper function to fill in the field vector indices for protocol-offset
6704 * pairs. These indexes are then ultimately programmed into a recipe.
6706 static enum ice_status
6707 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6708 struct LIST_HEAD_TYPE *rg_list)
6710 struct ice_sw_fv_list_entry *fv;
6711 struct ice_recp_grp_entry *rg;
6712 struct ice_fv_word *fv_ext;
6714 if (LIST_EMPTY(fv_list))
6717 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6718 fv_ext = fv->fv_ptr->ew;
6720 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6723 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6724 struct ice_fv_word *pr;
6729 pr = &rg->r_group.pairs[i];
6730 mask = rg->r_group.mask[i];
6732 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6733 if (fv_ext[j].prot_id == pr->prot_id &&
6734 fv_ext[j].off == pr->off) {
6737 /* Store index of field vector */
6739 rg->fv_mask[i] = mask;
6743 /* Protocol/offset could not be found, caller gave an
6747 return ICE_ERR_PARAM;
6755 * ice_find_free_recp_res_idx - find free result indexes for recipe
6756 * @hw: pointer to hardware structure
6757 * @profiles: bitmap of profiles that will be associated with the new recipe
6758 * @free_idx: pointer to variable to receive the free index bitmap
6760 * The algorithm used here is:
6761 * 1. When creating a new recipe, create a set P which contains all
6762 * Profiles that will be associated with our new recipe
6764 * 2. For each Profile p in set P:
6765 * a. Add all recipes associated with Profile p into set R
6766 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6767 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6768 * i. Or just assume they all have the same possible indexes:
6770 * i.e., PossibleIndexes = 0x0000F00000000000
6772 * 3. For each Recipe r in set R:
6773 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6774 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6776 * FreeIndexes will contain the bits indicating the indexes free for use,
6777 * then the code needs to update the recipe[r].used_result_idx_bits to
6778 * indicate which indexes were selected for use by this recipe.
6781 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6782 ice_bitmap_t *free_idx)
6784 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6785 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6786 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6789 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6790 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6791 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6792 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6794 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6796 /* For each profile we are going to associate the recipe with, add the
6797 * recipes that are associated with that profile. This will give us
6798 * the set of recipes that our recipe may collide with. Also, determine
6799 * what possible result indexes are usable given this set of profiles.
6801 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6802 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6803 ICE_MAX_NUM_RECIPES);
6804 ice_and_bitmap(possible_idx, possible_idx,
6805 hw->switch_info->prof_res_bm[bit],
6809 /* For each recipe that our new recipe may collide with, determine
6810 * which indexes have been used.
6812 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6813 ice_or_bitmap(used_idx, used_idx,
6814 hw->switch_info->recp_list[bit].res_idxs,
6817 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6819 /* return number of free indexes */
6820 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6824 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6825 * @hw: pointer to hardware structure
6826 * @rm: recipe management list entry
6827 * @profiles: bitmap of profiles that will be associated.
6829 static enum ice_status
6830 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6831 ice_bitmap_t *profiles)
6833 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6834 struct ice_aqc_recipe_data_elem *tmp;
6835 struct ice_aqc_recipe_data_elem *buf;
6836 struct ice_recp_grp_entry *entry;
6837 enum ice_status status;
6843 /* When more than one recipe are required, another recipe is needed to
6844 * chain them together. Matching a tunnel metadata ID takes up one of
6845 * the match fields in the chaining recipe reducing the number of
6846 * chained recipes by one.
6848 /* check number of free result indices */
6849 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6850 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6852 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6853 free_res_idx, rm->n_grp_count);
6855 if (rm->n_grp_count > 1) {
6856 if (rm->n_grp_count > free_res_idx)
6857 return ICE_ERR_MAX_LIMIT;
6862 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6863 return ICE_ERR_MAX_LIMIT;
6865 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6866 ICE_MAX_NUM_RECIPES,
6869 return ICE_ERR_NO_MEMORY;
6871 buf = (struct ice_aqc_recipe_data_elem *)
6872 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6874 status = ICE_ERR_NO_MEMORY;
6878 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6879 recipe_count = ICE_MAX_NUM_RECIPES;
6880 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6882 if (status || recipe_count == 0)
6885 /* Allocate the recipe resources, and configure them according to the
6886 * match fields from protocol headers and extracted field vectors.
6888 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6889 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6892 status = ice_alloc_recipe(hw, &entry->rid);
6896 /* Clear the result index of the located recipe, as this will be
6897 * updated, if needed, later in the recipe creation process.
6899 tmp[0].content.result_indx = 0;
6901 buf[recps] = tmp[0];
6902 buf[recps].recipe_indx = (u8)entry->rid;
6903 /* if the recipe is a non-root recipe RID should be programmed
6904 * as 0 for the rules to be applied correctly.
6906 buf[recps].content.rid = 0;
6907 ice_memset(&buf[recps].content.lkup_indx, 0,
6908 sizeof(buf[recps].content.lkup_indx),
6911 /* All recipes use look-up index 0 to match switch ID. */
6912 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6913 buf[recps].content.mask[0] =
6914 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6915 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6918 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6919 buf[recps].content.lkup_indx[i] = 0x80;
6920 buf[recps].content.mask[i] = 0;
6923 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6924 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6925 buf[recps].content.mask[i + 1] =
6926 CPU_TO_LE16(entry->fv_mask[i]);
6929 if (rm->n_grp_count > 1) {
6930 /* Checks to see if there really is a valid result index
6933 if (chain_idx >= ICE_MAX_FV_WORDS) {
6934 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6935 status = ICE_ERR_MAX_LIMIT;
6939 entry->chain_idx = chain_idx;
6940 buf[recps].content.result_indx =
6941 ICE_AQ_RECIPE_RESULT_EN |
6942 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6943 ICE_AQ_RECIPE_RESULT_DATA_M);
6944 ice_clear_bit(chain_idx, result_idx_bm);
6945 chain_idx = ice_find_first_bit(result_idx_bm,
6949 /* fill recipe dependencies */
6950 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6951 ICE_MAX_NUM_RECIPES);
6952 ice_set_bit(buf[recps].recipe_indx,
6953 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6954 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6958 if (rm->n_grp_count == 1) {
6959 rm->root_rid = buf[0].recipe_indx;
6960 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6961 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6962 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6963 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6964 sizeof(buf[0].recipe_bitmap),
6965 ICE_NONDMA_TO_NONDMA);
6967 status = ICE_ERR_BAD_PTR;
6970 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6971 * the recipe which is getting created if specified
6972 * by user. Usually any advanced switch filter, which results
6973 * into new extraction sequence, ended up creating a new recipe
6974 * of type ROOT and usually recipes are associated with profiles
6975 * Switch rule referreing newly created recipe, needs to have
6976 * either/or 'fwd' or 'join' priority, otherwise switch rule
6977 * evaluation will not happen correctly. In other words, if
6978 * switch rule to be evaluated on priority basis, then recipe
6979 * needs to have priority, otherwise it will be evaluated last.
6981 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6983 struct ice_recp_grp_entry *last_chain_entry;
6986 /* Allocate the last recipe that will chain the outcomes of the
6987 * other recipes together
6989 status = ice_alloc_recipe(hw, &rid);
6993 buf[recps].recipe_indx = (u8)rid;
6994 buf[recps].content.rid = (u8)rid;
6995 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6996 /* the new entry created should also be part of rg_list to
6997 * make sure we have complete recipe
6999 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7000 sizeof(*last_chain_entry));
7001 if (!last_chain_entry) {
7002 status = ICE_ERR_NO_MEMORY;
7005 last_chain_entry->rid = rid;
7006 ice_memset(&buf[recps].content.lkup_indx, 0,
7007 sizeof(buf[recps].content.lkup_indx),
7009 /* All recipes use look-up index 0 to match switch ID. */
7010 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7011 buf[recps].content.mask[0] =
7012 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7013 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7014 buf[recps].content.lkup_indx[i] =
7015 ICE_AQ_RECIPE_LKUP_IGNORE;
7016 buf[recps].content.mask[i] = 0;
7020 /* update r_bitmap with the recp that is used for chaining */
7021 ice_set_bit(rid, rm->r_bitmap);
7022 /* this is the recipe that chains all the other recipes so it
7023 * should not have a chaining ID to indicate the same
7025 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7026 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7028 last_chain_entry->fv_idx[i] = entry->chain_idx;
7029 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7030 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7031 ice_set_bit(entry->rid, rm->r_bitmap);
7033 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7034 if (sizeof(buf[recps].recipe_bitmap) >=
7035 sizeof(rm->r_bitmap)) {
7036 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7037 sizeof(buf[recps].recipe_bitmap),
7038 ICE_NONDMA_TO_NONDMA);
7040 status = ICE_ERR_BAD_PTR;
7043 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7046 rm->root_rid = (u8)rid;
7048 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7052 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7053 ice_release_change_lock(hw);
7057 /* Every recipe that just got created add it to the recipe
7060 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7061 struct ice_switch_info *sw = hw->switch_info;
7062 bool is_root, idx_found = false;
7063 struct ice_sw_recipe *recp;
7064 u16 idx, buf_idx = 0;
7066 /* find buffer index for copying some data */
7067 for (idx = 0; idx < rm->n_grp_count; idx++)
7068 if (buf[idx].recipe_indx == entry->rid) {
7074 status = ICE_ERR_OUT_OF_RANGE;
7078 recp = &sw->recp_list[entry->rid];
7079 is_root = (rm->root_rid == entry->rid);
7080 recp->is_root = is_root;
7082 recp->root_rid = entry->rid;
7083 recp->big_recp = (is_root && rm->n_grp_count > 1);
7085 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7086 entry->r_group.n_val_pairs *
7087 sizeof(struct ice_fv_word),
7088 ICE_NONDMA_TO_NONDMA);
7090 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7091 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7093 /* Copy non-result fv index values and masks to recipe. This
7094 * call will also update the result recipe bitmask.
7096 ice_collect_result_idx(&buf[buf_idx], recp);
7098 /* for non-root recipes, also copy to the root, this allows
7099 * easier matching of a complete chained recipe
7102 ice_collect_result_idx(&buf[buf_idx],
7103 &sw->recp_list[rm->root_rid]);
7105 recp->n_ext_words = entry->r_group.n_val_pairs;
7106 recp->chain_idx = entry->chain_idx;
7107 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7108 recp->n_grp_count = rm->n_grp_count;
7109 recp->tun_type = rm->tun_type;
7110 recp->recp_created = true;
7124 * ice_create_recipe_group - creates recipe group
7125 * @hw: pointer to hardware structure
7126 * @rm: recipe management list entry
7127 * @lkup_exts: lookup elements
7129 static enum ice_status
7130 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7131 struct ice_prot_lkup_ext *lkup_exts)
7133 enum ice_status status;
7136 rm->n_grp_count = 0;
7138 /* Create recipes for words that are marked not done by packing them
7141 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7142 &rm->rg_list, &recp_count);
7144 rm->n_grp_count += recp_count;
7145 rm->n_ext_words = lkup_exts->n_val_words;
7146 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7147 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7148 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7149 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7156 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7157 * @hw: pointer to hardware structure
7158 * @lkups: lookup elements or match criteria for the advanced recipe, one
7159 * structure per protocol header
7160 * @lkups_cnt: number of protocols
7161 * @bm: bitmap of field vectors to consider
7162 * @fv_list: pointer to a list that holds the returned field vectors
7164 static enum ice_status
7165 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7166 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7168 enum ice_status status;
7175 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7177 return ICE_ERR_NO_MEMORY;
7179 for (i = 0; i < lkups_cnt; i++)
7180 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7181 status = ICE_ERR_CFG;
7185 /* Find field vectors that include all specified protocol types */
7186 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7189 ice_free(hw, prot_ids);
7194 * ice_tun_type_match_mask - determine if tun type needs a match mask
7195 * @tun_type: tunnel type
7196 * @mask: mask to be used for the tunnel
7198 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7201 case ICE_SW_TUN_VXLAN_GPE:
7202 case ICE_SW_TUN_GENEVE:
7203 case ICE_SW_TUN_VXLAN:
7204 case ICE_SW_TUN_NVGRE:
7205 case ICE_SW_TUN_UDP:
7206 case ICE_ALL_TUNNELS:
7207 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7208 case ICE_NON_TUN_QINQ:
7209 case ICE_SW_TUN_PPPOE_QINQ:
7210 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7211 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7212 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7213 *mask = ICE_TUN_FLAG_MASK;
7216 case ICE_SW_TUN_GENEVE_VLAN:
7217 case ICE_SW_TUN_VXLAN_VLAN:
7218 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7228 * ice_add_special_words - Add words that are not protocols, such as metadata
7229 * @rinfo: other information regarding the rule e.g. priority and action info
7230 * @lkup_exts: lookup word structure
7232 static enum ice_status
7233 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7234 struct ice_prot_lkup_ext *lkup_exts)
7238 /* If this is a tunneled packet, then add recipe index to match the
7239 * tunnel bit in the packet metadata flags.
7241 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7242 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7243 u8 word = lkup_exts->n_val_words++;
7245 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7246 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7247 lkup_exts->field_mask[word] = mask;
7249 return ICE_ERR_MAX_LIMIT;
7256 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7257 * @hw: pointer to hardware structure
7258 * @rinfo: other information regarding the rule e.g. priority and action info
7259 * @bm: pointer to memory for returning the bitmap of field vectors
7262 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7265 enum ice_prof_type prof_type;
7267 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7269 switch (rinfo->tun_type) {
7271 case ICE_NON_TUN_QINQ:
7272 prof_type = ICE_PROF_NON_TUN;
7274 case ICE_ALL_TUNNELS:
7275 prof_type = ICE_PROF_TUN_ALL;
7277 case ICE_SW_TUN_VXLAN_GPE:
7278 case ICE_SW_TUN_GENEVE:
7279 case ICE_SW_TUN_GENEVE_VLAN:
7280 case ICE_SW_TUN_VXLAN:
7281 case ICE_SW_TUN_VXLAN_VLAN:
7282 case ICE_SW_TUN_UDP:
7283 case ICE_SW_TUN_GTP:
7284 prof_type = ICE_PROF_TUN_UDP;
7286 case ICE_SW_TUN_NVGRE:
7287 prof_type = ICE_PROF_TUN_GRE;
7289 case ICE_SW_TUN_PPPOE:
7290 case ICE_SW_TUN_PPPOE_QINQ:
7291 prof_type = ICE_PROF_TUN_PPPOE;
7293 case ICE_SW_TUN_PPPOE_PAY:
7294 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7295 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7297 case ICE_SW_TUN_PPPOE_IPV4:
7298 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7299 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7300 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7301 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7303 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7304 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7306 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7307 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7309 case ICE_SW_TUN_PPPOE_IPV6:
7310 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7311 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7312 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7313 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7315 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7316 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7318 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7319 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7321 case ICE_SW_TUN_PROFID_IPV6_ESP:
7322 case ICE_SW_TUN_IPV6_ESP:
7323 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7325 case ICE_SW_TUN_PROFID_IPV6_AH:
7326 case ICE_SW_TUN_IPV6_AH:
7327 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7329 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7330 case ICE_SW_TUN_IPV6_L2TPV3:
7331 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7333 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7334 case ICE_SW_TUN_IPV6_NAT_T:
7335 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7337 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7338 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7340 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7341 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7343 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7344 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7346 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7347 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7349 case ICE_SW_TUN_IPV4_NAT_T:
7350 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7352 case ICE_SW_TUN_IPV4_L2TPV3:
7353 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7355 case ICE_SW_TUN_IPV4_ESP:
7356 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7358 case ICE_SW_TUN_IPV4_AH:
7359 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7361 case ICE_SW_IPV4_TCP:
7362 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7364 case ICE_SW_IPV4_UDP:
7365 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7367 case ICE_SW_IPV6_TCP:
7368 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7370 case ICE_SW_IPV6_UDP:
7371 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7373 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7374 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7375 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7376 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7377 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7378 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7379 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7381 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7382 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7383 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7384 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7385 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7386 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7387 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7389 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7390 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7391 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7392 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7393 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7394 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7395 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7397 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7398 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7399 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7400 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7401 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7402 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7403 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7405 case ICE_SW_TUN_AND_NON_TUN:
7406 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7408 prof_type = ICE_PROF_ALL;
7412 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7416 * ice_is_prof_rule - determine if rule type is a profile rule
7417 * @type: the rule type
7419 * if the rule type is a profile rule, that means that there no field value
7420 * match required, in this case just a profile hit is required.
7422 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7425 case ICE_SW_TUN_PROFID_IPV6_ESP:
7426 case ICE_SW_TUN_PROFID_IPV6_AH:
7427 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7428 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7429 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7430 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7431 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7432 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7442 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7443 * @hw: pointer to hardware structure
7444 * @lkups: lookup elements or match criteria for the advanced recipe, one
7445 * structure per protocol header
7446 * @lkups_cnt: number of protocols
7447 * @rinfo: other information regarding the rule e.g. priority and action info
7448 * @rid: return the recipe ID of the recipe created
7450 static enum ice_status
7451 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7452 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7454 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7455 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7456 struct ice_prot_lkup_ext *lkup_exts;
7457 struct ice_recp_grp_entry *r_entry;
7458 struct ice_sw_fv_list_entry *fvit;
7459 struct ice_recp_grp_entry *r_tmp;
7460 struct ice_sw_fv_list_entry *tmp;
7461 enum ice_status status = ICE_SUCCESS;
7462 struct ice_sw_recipe *rm;
7465 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7466 return ICE_ERR_PARAM;
7468 lkup_exts = (struct ice_prot_lkup_ext *)
7469 ice_malloc(hw, sizeof(*lkup_exts));
7471 return ICE_ERR_NO_MEMORY;
7473 /* Determine the number of words to be matched and if it exceeds a
7474 * recipe's restrictions
7476 for (i = 0; i < lkups_cnt; i++) {
7479 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7480 status = ICE_ERR_CFG;
7481 goto err_free_lkup_exts;
7484 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7486 status = ICE_ERR_CFG;
7487 goto err_free_lkup_exts;
7491 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7493 status = ICE_ERR_NO_MEMORY;
7494 goto err_free_lkup_exts;
7497 /* Get field vectors that contain fields extracted from all the protocol
7498 * headers being programmed.
7500 INIT_LIST_HEAD(&rm->fv_list);
7501 INIT_LIST_HEAD(&rm->rg_list);
7503 /* Get bitmap of field vectors (profiles) that are compatible with the
7504 * rule request; only these will be searched in the subsequent call to
7507 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7509 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7513 /* Create any special protocol/offset pairs, such as looking at tunnel
7514 * bits by extracting metadata
7516 status = ice_add_special_words(rinfo, lkup_exts);
7518 goto err_free_lkup_exts;
7520 /* Group match words into recipes using preferred recipe grouping
7523 status = ice_create_recipe_group(hw, rm, lkup_exts);
7527 /* set the recipe priority if specified */
7528 rm->priority = (u8)rinfo->priority;
7530 /* Find offsets from the field vector. Pick the first one for all the
7533 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7537 /* An empty FV list means to use all the profiles returned in the
7540 if (LIST_EMPTY(&rm->fv_list)) {
7543 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7544 struct ice_sw_fv_list_entry *fvl;
7546 fvl = (struct ice_sw_fv_list_entry *)
7547 ice_malloc(hw, sizeof(*fvl));
7551 fvl->profile_id = j;
7552 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7556 /* get bitmap of all profiles the recipe will be associated with */
7557 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7558 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7560 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7561 ice_set_bit((u16)fvit->profile_id, profiles);
7564 /* Look for a recipe which matches our requested fv / mask list */
7565 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7566 if (*rid < ICE_MAX_NUM_RECIPES)
7567 /* Success if found a recipe that match the existing criteria */
7570 rm->tun_type = rinfo->tun_type;
7571 /* Recipe we need does not exist, add a recipe */
7572 status = ice_add_sw_recipe(hw, rm, profiles);
7576 /* Associate all the recipes created with all the profiles in the
7577 * common field vector.
7579 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7581 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7584 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7585 (u8 *)r_bitmap, NULL);
7589 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7590 ICE_MAX_NUM_RECIPES);
7591 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7595 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7598 ice_release_change_lock(hw);
7603 /* Update profile to recipe bitmap array */
7604 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7605 ICE_MAX_NUM_RECIPES);
7607 /* Update recipe to profile bitmap array */
7608 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7609 ice_set_bit((u16)fvit->profile_id,
7610 recipe_to_profile[j]);
7613 *rid = rm->root_rid;
7614 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7615 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7617 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7618 ice_recp_grp_entry, l_entry) {
7619 LIST_DEL(&r_entry->l_entry);
7620 ice_free(hw, r_entry);
7623 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7625 LIST_DEL(&fvit->list_entry);
7630 ice_free(hw, rm->root_buf);
7635 ice_free(hw, lkup_exts);
7641 * ice_find_dummy_packet - find dummy packet by tunnel type
7643 * @lkups: lookup elements or match criteria for the advanced recipe, one
7644 * structure per protocol header
7645 * @lkups_cnt: number of protocols
7646 * @tun_type: tunnel type from the match criteria
7647 * @pkt: dummy packet to fill according to filter match criteria
7648 * @pkt_len: packet length of dummy packet
7649 * @offsets: pointer to receive the pointer to the offsets for the packet
7652 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7653 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7655 const struct ice_dummy_pkt_offsets **offsets)
7657 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7661 for (i = 0; i < lkups_cnt; i++) {
7662 if (lkups[i].type == ICE_UDP_ILOS)
7664 else if (lkups[i].type == ICE_TCP_IL)
7666 else if (lkups[i].type == ICE_IPV6_OFOS)
7668 else if (lkups[i].type == ICE_VLAN_OFOS)
7670 else if (lkups[i].type == ICE_IPV4_OFOS &&
7671 lkups[i].h_u.ipv4_hdr.protocol ==
7672 ICE_IPV4_NVGRE_PROTO_ID &&
7673 lkups[i].m_u.ipv4_hdr.protocol ==
7676 else if (lkups[i].type == ICE_PPPOE &&
7677 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7678 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7679 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7682 else if (lkups[i].type == ICE_ETYPE_OL &&
7683 lkups[i].h_u.ethertype.ethtype_id ==
7684 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7685 lkups[i].m_u.ethertype.ethtype_id ==
7688 else if (lkups[i].type == ICE_IPV4_IL &&
7689 lkups[i].h_u.ipv4_hdr.protocol ==
7691 lkups[i].m_u.ipv4_hdr.protocol ==
7696 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7697 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7698 *pkt = dummy_qinq_ipv6_pkt;
7699 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7700 *offsets = dummy_qinq_ipv6_packet_offsets;
7702 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7703 tun_type == ICE_NON_TUN_QINQ) {
7704 *pkt = dummy_qinq_ipv4_pkt;
7705 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7706 *offsets = dummy_qinq_ipv4_packet_offsets;
7710 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7711 *pkt = dummy_qinq_pppoe_ipv6_packet;
7712 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7713 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7715 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7716 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7717 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7718 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7720 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7721 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7722 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7723 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7724 *offsets = dummy_qinq_pppoe_packet_offsets;
7728 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7729 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7730 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7731 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7733 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7734 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7735 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7736 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7738 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7739 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7740 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7741 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7743 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7744 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7745 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7746 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7748 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7749 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7750 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7751 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7753 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7754 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7755 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7756 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7760 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7761 *pkt = dummy_ipv4_esp_pkt;
7762 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7763 *offsets = dummy_ipv4_esp_packet_offsets;
7767 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7768 *pkt = dummy_ipv6_esp_pkt;
7769 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7770 *offsets = dummy_ipv6_esp_packet_offsets;
7774 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7775 *pkt = dummy_ipv4_ah_pkt;
7776 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7777 *offsets = dummy_ipv4_ah_packet_offsets;
7781 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7782 *pkt = dummy_ipv6_ah_pkt;
7783 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7784 *offsets = dummy_ipv6_ah_packet_offsets;
7788 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7789 *pkt = dummy_ipv4_nat_pkt;
7790 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7791 *offsets = dummy_ipv4_nat_packet_offsets;
7795 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7796 *pkt = dummy_ipv6_nat_pkt;
7797 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7798 *offsets = dummy_ipv6_nat_packet_offsets;
7802 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7803 *pkt = dummy_ipv4_l2tpv3_pkt;
7804 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7805 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7809 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7810 *pkt = dummy_ipv6_l2tpv3_pkt;
7811 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7812 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7816 if (tun_type == ICE_SW_TUN_GTP) {
7817 *pkt = dummy_udp_gtp_packet;
7818 *pkt_len = sizeof(dummy_udp_gtp_packet);
7819 *offsets = dummy_udp_gtp_packet_offsets;
7823 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7824 *pkt = dummy_pppoe_ipv6_packet;
7825 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7826 *offsets = dummy_pppoe_packet_offsets;
7828 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7829 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7830 *pkt = dummy_pppoe_ipv4_packet;
7831 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7832 *offsets = dummy_pppoe_packet_offsets;
7836 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7837 *pkt = dummy_pppoe_ipv4_packet;
7838 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7839 *offsets = dummy_pppoe_packet_ipv4_offsets;
7843 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7844 *pkt = dummy_pppoe_ipv4_tcp_packet;
7845 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7846 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7850 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7851 *pkt = dummy_pppoe_ipv4_udp_packet;
7852 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7853 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7857 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7858 *pkt = dummy_pppoe_ipv6_packet;
7859 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7860 *offsets = dummy_pppoe_packet_ipv6_offsets;
7864 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7865 *pkt = dummy_pppoe_ipv6_tcp_packet;
7866 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7867 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7871 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7872 *pkt = dummy_pppoe_ipv6_udp_packet;
7873 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7874 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7878 if (tun_type == ICE_SW_IPV4_TCP) {
7879 *pkt = dummy_tcp_packet;
7880 *pkt_len = sizeof(dummy_tcp_packet);
7881 *offsets = dummy_tcp_packet_offsets;
7885 if (tun_type == ICE_SW_IPV4_UDP) {
7886 *pkt = dummy_udp_packet;
7887 *pkt_len = sizeof(dummy_udp_packet);
7888 *offsets = dummy_udp_packet_offsets;
7892 if (tun_type == ICE_SW_IPV6_TCP) {
7893 *pkt = dummy_tcp_ipv6_packet;
7894 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7895 *offsets = dummy_tcp_ipv6_packet_offsets;
7899 if (tun_type == ICE_SW_IPV6_UDP) {
7900 *pkt = dummy_udp_ipv6_packet;
7901 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7902 *offsets = dummy_udp_ipv6_packet_offsets;
7906 if (tun_type == ICE_ALL_TUNNELS) {
7907 *pkt = dummy_gre_udp_packet;
7908 *pkt_len = sizeof(dummy_gre_udp_packet);
7909 *offsets = dummy_gre_udp_packet_offsets;
7913 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7915 *pkt = dummy_gre_tcp_packet;
7916 *pkt_len = sizeof(dummy_gre_tcp_packet);
7917 *offsets = dummy_gre_tcp_packet_offsets;
7921 *pkt = dummy_gre_udp_packet;
7922 *pkt_len = sizeof(dummy_gre_udp_packet);
7923 *offsets = dummy_gre_udp_packet_offsets;
7927 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7928 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7929 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7930 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7932 *pkt = dummy_udp_tun_tcp_packet;
7933 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7934 *offsets = dummy_udp_tun_tcp_packet_offsets;
7938 *pkt = dummy_udp_tun_udp_packet;
7939 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7940 *offsets = dummy_udp_tun_udp_packet_offsets;
7946 *pkt = dummy_vlan_udp_packet;
7947 *pkt_len = sizeof(dummy_vlan_udp_packet);
7948 *offsets = dummy_vlan_udp_packet_offsets;
7951 *pkt = dummy_udp_packet;
7952 *pkt_len = sizeof(dummy_udp_packet);
7953 *offsets = dummy_udp_packet_offsets;
7955 } else if (udp && ipv6) {
7957 *pkt = dummy_vlan_udp_ipv6_packet;
7958 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7959 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7962 *pkt = dummy_udp_ipv6_packet;
7963 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7964 *offsets = dummy_udp_ipv6_packet_offsets;
7966 } else if ((tcp && ipv6) || ipv6) {
7968 *pkt = dummy_vlan_tcp_ipv6_packet;
7969 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7970 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7973 *pkt = dummy_tcp_ipv6_packet;
7974 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7975 *offsets = dummy_tcp_ipv6_packet_offsets;
7980 *pkt = dummy_vlan_tcp_packet;
7981 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7982 *offsets = dummy_vlan_tcp_packet_offsets;
7984 *pkt = dummy_tcp_packet;
7985 *pkt_len = sizeof(dummy_tcp_packet);
7986 *offsets = dummy_tcp_packet_offsets;
7991 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7993 * @lkups: lookup elements or match criteria for the advanced recipe, one
7994 * structure per protocol header
7995 * @lkups_cnt: number of protocols
7996 * @s_rule: stores rule information from the match criteria
7997 * @dummy_pkt: dummy packet to fill according to filter match criteria
7998 * @pkt_len: packet length of dummy packet
7999 * @offsets: offset info for the dummy packet
8001 static enum ice_status
8002 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8003 struct ice_aqc_sw_rules_elem *s_rule,
8004 const u8 *dummy_pkt, u16 pkt_len,
8005 const struct ice_dummy_pkt_offsets *offsets)
8010 /* Start with a packet with a pre-defined/dummy content. Then, fill
8011 * in the header values to be looked up or matched.
8013 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8015 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8017 for (i = 0; i < lkups_cnt; i++) {
8018 enum ice_protocol_type type;
8019 u16 offset = 0, len = 0, j;
8022 /* find the start of this layer; it should be found since this
8023 * was already checked when search for the dummy packet
8025 type = lkups[i].type;
8026 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8027 if (type == offsets[j].type) {
8028 offset = offsets[j].offset;
8033 /* this should never happen in a correct calling sequence */
8035 return ICE_ERR_PARAM;
8037 switch (lkups[i].type) {
8040 len = sizeof(struct ice_ether_hdr);
8043 len = sizeof(struct ice_ethtype_hdr);
8047 len = sizeof(struct ice_vlan_hdr);
8051 len = sizeof(struct ice_ipv4_hdr);
8055 len = sizeof(struct ice_ipv6_hdr);
8060 len = sizeof(struct ice_l4_hdr);
8063 len = sizeof(struct ice_sctp_hdr);
8066 len = sizeof(struct ice_nvgre);
8071 len = sizeof(struct ice_udp_tnl_hdr);
8075 case ICE_GTP_NO_PAY:
8076 len = sizeof(struct ice_udp_gtp_hdr);
8079 len = sizeof(struct ice_pppoe_hdr);
8082 len = sizeof(struct ice_esp_hdr);
8085 len = sizeof(struct ice_nat_t_hdr);
8088 len = sizeof(struct ice_ah_hdr);
8091 len = sizeof(struct ice_l2tpv3_sess_hdr);
8094 return ICE_ERR_PARAM;
8097 /* the length should be a word multiple */
8098 if (len % ICE_BYTES_PER_WORD)
8101 /* We have the offset to the header start, the length, the
8102 * caller's header values and mask. Use this information to
8103 * copy the data into the dummy packet appropriately based on
8104 * the mask. Note that we need to only write the bits as
8105 * indicated by the mask to make sure we don't improperly write
8106 * over any significant packet data.
8108 for (j = 0; j < len / sizeof(u16); j++)
8109 if (((u16 *)&lkups[i].m_u)[j])
8110 ((u16 *)(pkt + offset))[j] =
8111 (((u16 *)(pkt + offset))[j] &
8112 ~((u16 *)&lkups[i].m_u)[j]) |
8113 (((u16 *)&lkups[i].h_u)[j] &
8114 ((u16 *)&lkups[i].m_u)[j]);
8117 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8123 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8124 * @hw: pointer to the hardware structure
8125 * @tun_type: tunnel type
8126 * @pkt: dummy packet to fill in
8127 * @offsets: offset info for the dummy packet
8129 static enum ice_status
8130 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8131 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8136 case ICE_SW_TUN_AND_NON_TUN:
8137 case ICE_SW_TUN_VXLAN_GPE:
8138 case ICE_SW_TUN_VXLAN:
8139 case ICE_SW_TUN_VXLAN_VLAN:
8140 case ICE_SW_TUN_UDP:
8141 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8145 case ICE_SW_TUN_GENEVE:
8146 case ICE_SW_TUN_GENEVE_VLAN:
8147 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8152 /* Nothing needs to be done for this tunnel type */
8156 /* Find the outer UDP protocol header and insert the port number */
8157 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8158 if (offsets[i].type == ICE_UDP_OF) {
8159 struct ice_l4_hdr *hdr;
8162 offset = offsets[i].offset;
8163 hdr = (struct ice_l4_hdr *)&pkt[offset];
8164 hdr->dst_port = CPU_TO_BE16(open_port);
8174 * ice_find_adv_rule_entry - Search a rule entry
8175 * @hw: pointer to the hardware structure
8176 * @lkups: lookup elements or match criteria for the advanced recipe, one
8177 * structure per protocol header
8178 * @lkups_cnt: number of protocols
8179 * @recp_id: recipe ID for which we are finding the rule
8180 * @rinfo: other information regarding the rule e.g. priority and action info
8182 * Helper function to search for a given advance rule entry
8183 * Returns pointer to entry storing the rule if found
8185 static struct ice_adv_fltr_mgmt_list_entry *
8186 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8187 u16 lkups_cnt, u16 recp_id,
8188 struct ice_adv_rule_info *rinfo)
8190 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8191 struct ice_switch_info *sw = hw->switch_info;
8194 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8195 ice_adv_fltr_mgmt_list_entry, list_entry) {
8196 bool lkups_matched = true;
8198 if (lkups_cnt != list_itr->lkups_cnt)
8200 for (i = 0; i < list_itr->lkups_cnt; i++)
8201 if (memcmp(&list_itr->lkups[i], &lkups[i],
8203 lkups_matched = false;
8206 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8207 rinfo->tun_type == list_itr->rule_info.tun_type &&
8215 * ice_adv_add_update_vsi_list
8216 * @hw: pointer to the hardware structure
8217 * @m_entry: pointer to current adv filter management list entry
8218 * @cur_fltr: filter information from the book keeping entry
8219 * @new_fltr: filter information with the new VSI to be added
8221 * Call AQ command to add or update previously created VSI list with new VSI.
8223 * Helper function to do book keeping associated with adding filter information
8224 * The algorithm to do the booking keeping is described below :
8225 * When a VSI needs to subscribe to a given advanced filter
8226 * if only one VSI has been added till now
8227 * Allocate a new VSI list and add two VSIs
8228 * to this list using switch rule command
8229 * Update the previously created switch rule with the
8230 * newly created VSI list ID
8231 * if a VSI list was previously created
8232 * Add the new VSI to the previously created VSI list set
8233 * using the update switch rule command
8235 static enum ice_status
8236 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8237 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8238 struct ice_adv_rule_info *cur_fltr,
8239 struct ice_adv_rule_info *new_fltr)
8241 enum ice_status status;
8242 u16 vsi_list_id = 0;
8244 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8245 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8246 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8247 return ICE_ERR_NOT_IMPL;
8249 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8250 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8251 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8252 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8253 return ICE_ERR_NOT_IMPL;
8255 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8256 /* Only one entry existed in the mapping and it was not already
8257 * a part of a VSI list. So, create a VSI list with the old and
8260 struct ice_fltr_info tmp_fltr;
8261 u16 vsi_handle_arr[2];
8263 /* A rule already exists with the new VSI being added */
8264 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8265 new_fltr->sw_act.fwd_id.hw_vsi_id)
8266 return ICE_ERR_ALREADY_EXISTS;
8268 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8269 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8270 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8276 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8277 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8278 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8279 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8280 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8281 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8283 /* Update the previous switch rule of "forward to VSI" to
8286 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8290 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8291 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8292 m_entry->vsi_list_info =
8293 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8296 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8298 if (!m_entry->vsi_list_info)
8301 /* A rule already exists with the new VSI being added */
8302 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8305 /* Update the previously created VSI list set with
8306 * the new VSI ID passed in
8308 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8310 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8312 ice_aqc_opc_update_sw_rules,
8314 /* update VSI list mapping info with new VSI ID */
8316 ice_set_bit(vsi_handle,
8317 m_entry->vsi_list_info->vsi_map);
8320 m_entry->vsi_count++;
8325 * ice_add_adv_rule - helper function to create an advanced switch rule
8326 * @hw: pointer to the hardware structure
8327 * @lkups: information on the words that needs to be looked up. All words
8328 * together makes one recipe
8329 * @lkups_cnt: num of entries in the lkups array
8330 * @rinfo: other information related to the rule that needs to be programmed
8331 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8332 * ignored is case of error.
8334 * This function can program only 1 rule at a time. The lkups is used to
8335 * describe the all the words that forms the "lookup" portion of the recipe.
8336 * These words can span multiple protocols. Callers to this function need to
8337 * pass in a list of protocol headers with lookup information along and mask
8338 * that determines which words are valid from the given protocol header.
8339 * rinfo describes other information related to this rule such as forwarding
8340 * IDs, priority of this rule, etc.
8343 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8344 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8345 struct ice_rule_query_data *added_entry)
8347 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8348 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8349 const struct ice_dummy_pkt_offsets *pkt_offsets;
8350 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8351 struct LIST_HEAD_TYPE *rule_head;
8352 struct ice_switch_info *sw;
8353 enum ice_status status;
8354 const u8 *pkt = NULL;
8360 /* Initialize profile to result index bitmap */
8361 if (!hw->switch_info->prof_res_bm_init) {
8362 hw->switch_info->prof_res_bm_init = 1;
8363 ice_init_prof_result_bm(hw);
8366 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8367 if (!prof_rule && !lkups_cnt)
8368 return ICE_ERR_PARAM;
8370 /* get # of words we need to match */
8372 for (i = 0; i < lkups_cnt; i++) {
8375 ptr = (u16 *)&lkups[i].m_u;
8376 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8382 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8383 return ICE_ERR_PARAM;
8385 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8386 return ICE_ERR_PARAM;
8389 /* make sure that we can locate a dummy packet */
8390 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8393 status = ICE_ERR_PARAM;
8394 goto err_ice_add_adv_rule;
8397 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8398 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8399 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8400 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8403 vsi_handle = rinfo->sw_act.vsi_handle;
8404 if (!ice_is_vsi_valid(hw, vsi_handle))
8405 return ICE_ERR_PARAM;
8407 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8408 rinfo->sw_act.fwd_id.hw_vsi_id =
8409 ice_get_hw_vsi_num(hw, vsi_handle);
8410 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8411 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8413 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8416 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8418 /* we have to add VSI to VSI_LIST and increment vsi_count.
8419 * Also Update VSI list so that we can change forwarding rule
8420 * if the rule already exists, we will check if it exists with
8421 * same vsi_id, if not then add it to the VSI list if it already
8422 * exists if not then create a VSI list and add the existing VSI
8423 * ID and the new VSI ID to the list
8424 * We will add that VSI to the list
8426 status = ice_adv_add_update_vsi_list(hw, m_entry,
8427 &m_entry->rule_info,
8430 added_entry->rid = rid;
8431 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8432 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8436 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8437 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8439 return ICE_ERR_NO_MEMORY;
8440 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8441 switch (rinfo->sw_act.fltr_act) {
8442 case ICE_FWD_TO_VSI:
8443 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8444 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8445 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8448 act |= ICE_SINGLE_ACT_TO_Q;
8449 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8450 ICE_SINGLE_ACT_Q_INDEX_M;
8452 case ICE_FWD_TO_QGRP:
8453 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8454 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8455 act |= ICE_SINGLE_ACT_TO_Q;
8456 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8457 ICE_SINGLE_ACT_Q_INDEX_M;
8458 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8459 ICE_SINGLE_ACT_Q_REGION_M;
8461 case ICE_DROP_PACKET:
8462 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8463 ICE_SINGLE_ACT_VALID_BIT;
8466 status = ICE_ERR_CFG;
8467 goto err_ice_add_adv_rule;
8470 /* set the rule LOOKUP type based on caller specified 'RX'
8471 * instead of hardcoding it to be either LOOKUP_TX/RX
8473 * for 'RX' set the source to be the port number
8474 * for 'TX' set the source to be the source HW VSI number (determined
8478 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8479 s_rule->pdata.lkup_tx_rx.src =
8480 CPU_TO_LE16(hw->port_info->lport);
8482 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8483 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8486 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8487 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8489 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8490 pkt_len, pkt_offsets);
8492 goto err_ice_add_adv_rule;
8494 if (rinfo->tun_type != ICE_NON_TUN &&
8495 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8496 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8497 s_rule->pdata.lkup_tx_rx.hdr,
8500 goto err_ice_add_adv_rule;
8503 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8504 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8507 goto err_ice_add_adv_rule;
8508 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8509 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8511 status = ICE_ERR_NO_MEMORY;
8512 goto err_ice_add_adv_rule;
8515 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8516 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8517 ICE_NONDMA_TO_NONDMA);
8518 if (!adv_fltr->lkups && !prof_rule) {
8519 status = ICE_ERR_NO_MEMORY;
8520 goto err_ice_add_adv_rule;
8523 adv_fltr->lkups_cnt = lkups_cnt;
8524 adv_fltr->rule_info = *rinfo;
8525 adv_fltr->rule_info.fltr_rule_id =
8526 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8527 sw = hw->switch_info;
8528 sw->recp_list[rid].adv_rule = true;
8529 rule_head = &sw->recp_list[rid].filt_rules;
8531 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8532 adv_fltr->vsi_count = 1;
8534 /* Add rule entry to book keeping list */
8535 LIST_ADD(&adv_fltr->list_entry, rule_head);
8537 added_entry->rid = rid;
8538 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8539 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8541 err_ice_add_adv_rule:
8542 if (status && adv_fltr) {
8543 ice_free(hw, adv_fltr->lkups);
8544 ice_free(hw, adv_fltr);
8547 ice_free(hw, s_rule);
8553 * ice_adv_rem_update_vsi_list
8554 * @hw: pointer to the hardware structure
8555 * @vsi_handle: VSI handle of the VSI to remove
8556 * @fm_list: filter management entry for which the VSI list management needs to
8559 static enum ice_status
8560 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8561 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8563 struct ice_vsi_list_map_info *vsi_list_info;
8564 enum ice_sw_lkup_type lkup_type;
8565 enum ice_status status;
8568 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8569 fm_list->vsi_count == 0)
8570 return ICE_ERR_PARAM;
8572 /* A rule with the VSI being removed does not exist */
8573 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8574 return ICE_ERR_DOES_NOT_EXIST;
8576 lkup_type = ICE_SW_LKUP_LAST;
8577 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8578 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8579 ice_aqc_opc_update_sw_rules,
8584 fm_list->vsi_count--;
8585 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8586 vsi_list_info = fm_list->vsi_list_info;
8587 if (fm_list->vsi_count == 1) {
8588 struct ice_fltr_info tmp_fltr;
8591 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8593 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8594 return ICE_ERR_OUT_OF_RANGE;
8596 /* Make sure VSI list is empty before removing it below */
8597 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8599 ice_aqc_opc_update_sw_rules,
8604 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8605 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8606 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8607 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8608 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8609 tmp_fltr.fwd_id.hw_vsi_id =
8610 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8611 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8612 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8613 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8615 /* Update the previous switch rule of "MAC forward to VSI" to
8616 * "MAC fwd to VSI list"
8618 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8620 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8621 tmp_fltr.fwd_id.hw_vsi_id, status);
8624 fm_list->vsi_list_info->ref_cnt--;
8626 /* Remove the VSI list since it is no longer used */
8627 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8629 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8630 vsi_list_id, status);
8634 LIST_DEL(&vsi_list_info->list_entry);
8635 ice_free(hw, vsi_list_info);
8636 fm_list->vsi_list_info = NULL;
8643 * ice_rem_adv_rule - removes existing advanced switch rule
8644 * @hw: pointer to the hardware structure
8645 * @lkups: information on the words that needs to be looked up. All words
8646 * together makes one recipe
8647 * @lkups_cnt: num of entries in the lkups array
8648 * @rinfo: Its the pointer to the rule information for the rule
8650 * This function can be used to remove 1 rule at a time. The lkups is
8651 * used to describe all the words that forms the "lookup" portion of the
8652 * rule. These words can span multiple protocols. Callers to this function
8653 * need to pass in a list of protocol headers with lookup information along
8654 * and mask that determines which words are valid from the given protocol
8655 * header. rinfo describes other information related to this rule such as
8656 * forwarding IDs, priority of this rule, etc.
8659 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8660 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8662 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8663 struct ice_prot_lkup_ext lkup_exts;
8664 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8665 enum ice_status status = ICE_SUCCESS;
8666 bool remove_rule = false;
8667 u16 i, rid, vsi_handle;
8669 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8670 for (i = 0; i < lkups_cnt; i++) {
8673 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8676 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8681 /* Create any special protocol/offset pairs, such as looking at tunnel
8682 * bits by extracting metadata
8684 status = ice_add_special_words(rinfo, &lkup_exts);
8688 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8689 /* If did not find a recipe that match the existing criteria */
8690 if (rid == ICE_MAX_NUM_RECIPES)
8691 return ICE_ERR_PARAM;
8693 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8694 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8695 /* the rule is already removed */
8698 ice_acquire_lock(rule_lock);
8699 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8701 } else if (list_elem->vsi_count > 1) {
8702 remove_rule = false;
8703 vsi_handle = rinfo->sw_act.vsi_handle;
8704 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8706 vsi_handle = rinfo->sw_act.vsi_handle;
8707 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8709 ice_release_lock(rule_lock);
8712 if (list_elem->vsi_count == 0)
8715 ice_release_lock(rule_lock);
8717 struct ice_aqc_sw_rules_elem *s_rule;
8720 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8721 s_rule = (struct ice_aqc_sw_rules_elem *)
8722 ice_malloc(hw, rule_buf_sz);
8724 return ICE_ERR_NO_MEMORY;
8725 s_rule->pdata.lkup_tx_rx.act = 0;
8726 s_rule->pdata.lkup_tx_rx.index =
8727 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8728 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8729 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8731 ice_aqc_opc_remove_sw_rules, NULL);
8732 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8733 struct ice_switch_info *sw = hw->switch_info;
8735 ice_acquire_lock(rule_lock);
8736 LIST_DEL(&list_elem->list_entry);
8737 ice_free(hw, list_elem->lkups);
8738 ice_free(hw, list_elem);
8739 ice_release_lock(rule_lock);
8740 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8741 sw->recp_list[rid].adv_rule = false;
8743 ice_free(hw, s_rule);
8749 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8750 * @hw: pointer to the hardware structure
8751 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8753 * This function is used to remove 1 rule at a time. The removal is based on
8754 * the remove_entry parameter. This function will remove rule for a given
8755 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8758 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8759 struct ice_rule_query_data *remove_entry)
8761 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8762 struct LIST_HEAD_TYPE *list_head;
8763 struct ice_adv_rule_info rinfo;
8764 struct ice_switch_info *sw;
8766 sw = hw->switch_info;
8767 if (!sw->recp_list[remove_entry->rid].recp_created)
8768 return ICE_ERR_PARAM;
8769 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8770 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8772 if (list_itr->rule_info.fltr_rule_id ==
8773 remove_entry->rule_id) {
8774 rinfo = list_itr->rule_info;
8775 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8776 return ice_rem_adv_rule(hw, list_itr->lkups,
8777 list_itr->lkups_cnt, &rinfo);
8780 /* either list is empty or unable to find rule */
8781 return ICE_ERR_DOES_NOT_EXIST;
8785 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8787 * @hw: pointer to the hardware structure
8788 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8790 * This function is used to remove all the rules for a given VSI and as soon
8791 * as removing a rule fails, it will return immediately with the error code,
8792 * else it will return ICE_SUCCESS
8794 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8796 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8797 struct ice_vsi_list_map_info *map_info;
8798 struct LIST_HEAD_TYPE *list_head;
8799 struct ice_adv_rule_info rinfo;
8800 struct ice_switch_info *sw;
8801 enum ice_status status;
8804 sw = hw->switch_info;
8805 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8806 if (!sw->recp_list[rid].recp_created)
8808 if (!sw->recp_list[rid].adv_rule)
8811 list_head = &sw->recp_list[rid].filt_rules;
8812 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8813 ice_adv_fltr_mgmt_list_entry,
8815 rinfo = list_itr->rule_info;
8817 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8818 map_info = list_itr->vsi_list_info;
8822 if (!ice_is_bit_set(map_info->vsi_map,
8825 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8829 rinfo.sw_act.vsi_handle = vsi_handle;
8830 status = ice_rem_adv_rule(hw, list_itr->lkups,
8831 list_itr->lkups_cnt, &rinfo);
8841 * ice_replay_fltr - Replay all the filters stored by a specific list head
8842 * @hw: pointer to the hardware structure
8843 * @list_head: list for which filters needs to be replayed
8844 * @recp_id: Recipe ID for which rules need to be replayed
8846 static enum ice_status
8847 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8849 struct ice_fltr_mgmt_list_entry *itr;
8850 enum ice_status status = ICE_SUCCESS;
8851 struct ice_sw_recipe *recp_list;
8852 u8 lport = hw->port_info->lport;
8853 struct LIST_HEAD_TYPE l_head;
8855 if (LIST_EMPTY(list_head))
8858 recp_list = &hw->switch_info->recp_list[recp_id];
8859 /* Move entries from the given list_head to a temporary l_head so that
8860 * they can be replayed. Otherwise when trying to re-add the same
8861 * filter, the function will return already exists
8863 LIST_REPLACE_INIT(list_head, &l_head);
8865 /* Mark the given list_head empty by reinitializing it so filters
8866 * could be added again by *handler
8868 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8870 struct ice_fltr_list_entry f_entry;
8873 f_entry.fltr_info = itr->fltr_info;
8874 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8875 status = ice_add_rule_internal(hw, recp_list, lport,
8877 if (status != ICE_SUCCESS)
8882 /* Add a filter per VSI separately */
8883 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8885 if (!ice_is_vsi_valid(hw, vsi_handle))
8888 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8889 f_entry.fltr_info.vsi_handle = vsi_handle;
8890 f_entry.fltr_info.fwd_id.hw_vsi_id =
8891 ice_get_hw_vsi_num(hw, vsi_handle);
8892 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8893 if (recp_id == ICE_SW_LKUP_VLAN)
8894 status = ice_add_vlan_internal(hw, recp_list,
8897 status = ice_add_rule_internal(hw, recp_list,
8900 if (status != ICE_SUCCESS)
8905 /* Clear the filter management list */
8906 ice_rem_sw_rule_info(hw, &l_head);
8911 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8912 * @hw: pointer to the hardware structure
8914 * NOTE: This function does not clean up partially added filters on error.
8915 * It is up to caller of the function to issue a reset or fail early.
8917 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8919 struct ice_switch_info *sw = hw->switch_info;
8920 enum ice_status status = ICE_SUCCESS;
8923 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8924 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8926 status = ice_replay_fltr(hw, i, head);
8927 if (status != ICE_SUCCESS)
8934 * ice_replay_vsi_fltr - Replay filters for requested VSI
8935 * @hw: pointer to the hardware structure
8936 * @pi: pointer to port information structure
8937 * @sw: pointer to switch info struct for which function replays filters
8938 * @vsi_handle: driver VSI handle
8939 * @recp_id: Recipe ID for which rules need to be replayed
8940 * @list_head: list for which filters need to be replayed
8942 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8943 * It is required to pass valid VSI handle.
8945 static enum ice_status
8946 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8947 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8948 struct LIST_HEAD_TYPE *list_head)
8950 struct ice_fltr_mgmt_list_entry *itr;
8951 enum ice_status status = ICE_SUCCESS;
8952 struct ice_sw_recipe *recp_list;
8955 if (LIST_EMPTY(list_head))
8957 recp_list = &sw->recp_list[recp_id];
8958 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8960 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8962 struct ice_fltr_list_entry f_entry;
8964 f_entry.fltr_info = itr->fltr_info;
8965 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8966 itr->fltr_info.vsi_handle == vsi_handle) {
8967 /* update the src in case it is VSI num */
8968 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8969 f_entry.fltr_info.src = hw_vsi_id;
8970 status = ice_add_rule_internal(hw, recp_list,
8973 if (status != ICE_SUCCESS)
8977 if (!itr->vsi_list_info ||
8978 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8980 /* Clearing it so that the logic can add it back */
8981 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8982 f_entry.fltr_info.vsi_handle = vsi_handle;
8983 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8984 /* update the src in case it is VSI num */
8985 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8986 f_entry.fltr_info.src = hw_vsi_id;
8987 if (recp_id == ICE_SW_LKUP_VLAN)
8988 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8990 status = ice_add_rule_internal(hw, recp_list,
8993 if (status != ICE_SUCCESS)
9001 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9002 * @hw: pointer to the hardware structure
9003 * @vsi_handle: driver VSI handle
9004 * @list_head: list for which filters need to be replayed
9006 * Replay the advanced rule for the given VSI.
9008 static enum ice_status
9009 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9010 struct LIST_HEAD_TYPE *list_head)
9012 struct ice_rule_query_data added_entry = { 0 };
9013 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9014 enum ice_status status = ICE_SUCCESS;
9016 if (LIST_EMPTY(list_head))
9018 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9020 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9021 u16 lk_cnt = adv_fltr->lkups_cnt;
9023 if (vsi_handle != rinfo->sw_act.vsi_handle)
9025 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9034 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9035 * @hw: pointer to the hardware structure
9036 * @pi: pointer to port information structure
9037 * @vsi_handle: driver VSI handle
9039 * Replays filters for requested VSI via vsi_handle.
9042 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9045 struct ice_switch_info *sw = hw->switch_info;
9046 enum ice_status status;
9049 /* Update the recipes that were created */
9050 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9051 struct LIST_HEAD_TYPE *head;
9053 head = &sw->recp_list[i].filt_replay_rules;
9054 if (!sw->recp_list[i].adv_rule)
9055 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9058 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9059 if (status != ICE_SUCCESS)
9067 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
9068 * @hw: pointer to the HW struct
9069 * @sw: pointer to switch info struct for which function removes filters
9071 * Deletes the filter replay rules for given switch
9073 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9080 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9081 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9082 struct LIST_HEAD_TYPE *l_head;
9084 l_head = &sw->recp_list[i].filt_replay_rules;
9085 if (!sw->recp_list[i].adv_rule)
9086 ice_rem_sw_rule_info(hw, l_head);
9088 ice_rem_adv_rule_info(hw, l_head);
9094 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9095 * @hw: pointer to the HW struct
9097 * Deletes the filter replay rules.
9099 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9101 ice_rm_sw_replay_rule_info(hw, hw->switch_info);