1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
508 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
510 { ICE_IPV4_OFOS, 14 },
515 { ICE_PROTOCOL_LAST, 0 },
518 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
519 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
524 0x45, 0x00, 0x00, 0x58, /* IP 14 */
525 0x00, 0x00, 0x00, 0x00,
526 0x00, 0x11, 0x00, 0x00,
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00,
530 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
531 0x00, 0x44, 0x00, 0x00,
533 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
534 0x00, 0x00, 0x00, 0x00,
535 0x00, 0x00, 0x00, 0x85,
537 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
538 0x00, 0x00, 0x00, 0x00,
540 0x45, 0x00, 0x00, 0x28, /* IP 62 */
541 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x06, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
546 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00,
549 0x50, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, /* 2 bytes for 4 byte alignment */
555 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
556 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
558 { ICE_IPV4_OFOS, 14 },
562 { ICE_UDP_ILOS, 82 },
563 { ICE_PROTOCOL_LAST, 0 },
566 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
567 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
568 0x00, 0x00, 0x00, 0x00,
569 0x00, 0x00, 0x00, 0x00,
572 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x11, 0x00, 0x00,
575 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
579 0x00, 0x38, 0x00, 0x00,
581 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x85,
585 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
586 0x00, 0x00, 0x00, 0x00,
588 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x11, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
595 0x00, 0x08, 0x00, 0x00,
597 0x00, 0x00, /* 2 bytes for 4 byte alignment */
600 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
601 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
603 { ICE_IPV4_OFOS, 14 },
608 { ICE_PROTOCOL_LAST, 0 },
611 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
612 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
617 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x11, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
624 0x00, 0x58, 0x00, 0x00,
626 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
627 0x00, 0x00, 0x00, 0x00,
628 0x00, 0x00, 0x00, 0x85,
630 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
631 0x00, 0x00, 0x00, 0x00,
633 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
634 0x00, 0x14, 0x06, 0x00,
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x50, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, /* 2 bytes for 4 byte alignment */
653 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
655 { ICE_IPV4_OFOS, 14 },
659 { ICE_UDP_ILOS, 102 },
660 { ICE_PROTOCOL_LAST, 0 },
663 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
664 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
665 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00,
669 0x45, 0x00, 0x00, 0x60, /* IP 14 */
670 0x00, 0x00, 0x00, 0x00,
671 0x00, 0x11, 0x00, 0x00,
672 0x00, 0x00, 0x00, 0x00,
673 0x00, 0x00, 0x00, 0x00,
675 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
676 0x00, 0x4c, 0x00, 0x00,
678 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
679 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x85,
682 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
683 0x00, 0x00, 0x00, 0x00,
685 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
686 0x00, 0x08, 0x11, 0x00,
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
697 0x00, 0x08, 0x00, 0x00,
699 0x00, 0x00, /* 2 bytes for 4 byte alignment */
702 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
704 { ICE_IPV6_OFOS, 14 },
709 { ICE_PROTOCOL_LAST, 0 },
712 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
713 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
718 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
719 0x00, 0x44, 0x11, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
730 0x00, 0x44, 0x00, 0x00,
732 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
733 0x00, 0x00, 0x00, 0x00,
734 0x00, 0x00, 0x00, 0x85,
736 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
737 0x00, 0x00, 0x00, 0x00,
739 0x45, 0x00, 0x00, 0x28, /* IP 82 */
740 0x00, 0x00, 0x00, 0x00,
741 0x00, 0x06, 0x00, 0x00,
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
745 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x50, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, /* 2 bytes for 4 byte alignment */
754 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
756 { ICE_IPV6_OFOS, 14 },
760 { ICE_UDP_ILOS, 102 },
761 { ICE_PROTOCOL_LAST, 0 },
764 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
765 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
770 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
771 0x00, 0x38, 0x11, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
781 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
782 0x00, 0x38, 0x00, 0x00,
784 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
785 0x00, 0x00, 0x00, 0x00,
786 0x00, 0x00, 0x00, 0x85,
788 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
789 0x00, 0x00, 0x00, 0x00,
791 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x11, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
798 0x00, 0x08, 0x00, 0x00,
800 0x00, 0x00, /* 2 bytes for 4 byte alignment */
803 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
805 { ICE_IPV6_OFOS, 14 },
810 { ICE_PROTOCOL_LAST, 0 },
813 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
814 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
819 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
820 0x00, 0x58, 0x11, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
830 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
831 0x00, 0x58, 0x00, 0x00,
833 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
834 0x00, 0x00, 0x00, 0x00,
835 0x00, 0x00, 0x00, 0x85,
837 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
838 0x00, 0x00, 0x00, 0x00,
840 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
841 0x00, 0x14, 0x06, 0x00,
842 0x00, 0x00, 0x00, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x50, 0x00, 0x00, 0x00,
855 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, /* 2 bytes for 4 byte alignment */
860 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
862 { ICE_IPV6_OFOS, 14 },
866 { ICE_UDP_ILOS, 102 },
867 { ICE_PROTOCOL_LAST, 0 },
870 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
871 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
876 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
877 0x00, 0x4c, 0x11, 0x00,
878 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
888 0x00, 0x4c, 0x00, 0x00,
890 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
891 0x00, 0x00, 0x00, 0x00,
892 0x00, 0x00, 0x00, 0x85,
894 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
895 0x00, 0x00, 0x00, 0x00,
897 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
898 0x00, 0x08, 0x11, 0x00,
899 0x00, 0x00, 0x00, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
908 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
909 0x00, 0x08, 0x00, 0x00,
911 0x00, 0x00, /* 2 bytes for 4 byte alignment */
914 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
916 { ICE_IPV4_OFOS, 14 },
919 { ICE_PROTOCOL_LAST, 0 },
922 static const u8 dummy_udp_gtp_packet[] = {
923 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
928 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
929 0x00, 0x00, 0x00, 0x00,
930 0x00, 0x11, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
935 0x00, 0x1c, 0x00, 0x00,
937 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
938 0x00, 0x00, 0x00, 0x00,
939 0x00, 0x00, 0x00, 0x85,
941 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
942 0x00, 0x00, 0x00, 0x00,
946 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
948 { ICE_IPV4_OFOS, 14 },
952 { ICE_PROTOCOL_LAST, 0 },
955 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
956 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
957 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x00, 0x00,
961 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
962 0x00, 0x00, 0x40, 0x00,
963 0x40, 0x11, 0x00, 0x00,
964 0x00, 0x00, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
967 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
968 0x00, 0x00, 0x00, 0x00,
970 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x85,
974 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
975 0x00, 0x00, 0x00, 0x00,
977 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
978 0x00, 0x00, 0x40, 0x00,
979 0x40, 0x00, 0x00, 0x00,
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x00,
986 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
988 { ICE_IPV4_OFOS, 14 },
992 { ICE_PROTOCOL_LAST, 0 },
995 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
996 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
997 0x00, 0x00, 0x00, 0x00,
998 0x00, 0x00, 0x00, 0x00,
1001 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
1002 0x00, 0x00, 0x40, 0x00,
1003 0x40, 0x11, 0x00, 0x00,
1004 0x00, 0x00, 0x00, 0x00,
1005 0x00, 0x00, 0x00, 0x00,
1007 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1008 0x00, 0x00, 0x00, 0x00,
1010 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x85,
1014 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1015 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1018 0x00, 0x00, 0x3b, 0x00,
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1032 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1033 { ICE_MAC_OFOS, 0 },
1034 { ICE_IPV6_OFOS, 14 },
1037 { ICE_IPV4_IL, 82 },
1038 { ICE_PROTOCOL_LAST, 0 },
1041 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1042 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1043 0x00, 0x00, 0x00, 0x00,
1044 0x00, 0x00, 0x00, 0x00,
1047 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1048 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1051 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00,
1058 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1059 0x00, 0x00, 0x00, 0x00,
1061 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1062 0x00, 0x00, 0x00, 0x00,
1063 0x00, 0x00, 0x00, 0x85,
1065 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1066 0x00, 0x00, 0x00, 0x00,
1068 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1069 0x00, 0x00, 0x40, 0x00,
1070 0x40, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1072 0x00, 0x00, 0x00, 0x00,
1078 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1079 { ICE_MAC_OFOS, 0 },
1080 { ICE_IPV6_OFOS, 14 },
1083 { ICE_IPV6_IL, 82 },
1084 { ICE_PROTOCOL_LAST, 0 },
1087 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1088 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1093 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1094 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1095 0x00, 0x00, 0x00, 0x00,
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1104 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1105 0x00, 0x00, 0x00, 0x00,
1107 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1108 0x00, 0x00, 0x00, 0x00,
1109 0x00, 0x00, 0x00, 0x85,
1111 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1112 0x00, 0x00, 0x00, 0x00,
1114 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1115 0x00, 0x00, 0x3b, 0x00,
1116 0x00, 0x00, 0x00, 0x00,
1117 0x00, 0x00, 0x00, 0x00,
1118 0x00, 0x00, 0x00, 0x00,
1119 0x00, 0x00, 0x00, 0x00,
1120 0x00, 0x00, 0x00, 0x00,
1121 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x00,
1123 0x00, 0x00, 0x00, 0x00,
1129 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1130 { ICE_MAC_OFOS, 0 },
1131 { ICE_IPV4_OFOS, 14 },
1133 { ICE_GTP_NO_PAY, 42 },
1134 { ICE_PROTOCOL_LAST, 0 },
1138 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1139 { ICE_MAC_OFOS, 0 },
1140 { ICE_IPV6_OFOS, 14 },
1142 { ICE_GTP_NO_PAY, 62 },
1143 { ICE_PROTOCOL_LAST, 0 },
1146 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1147 { ICE_MAC_OFOS, 0 },
1148 { ICE_ETYPE_OL, 12 },
1149 { ICE_VLAN_OFOS, 14},
1151 { ICE_PROTOCOL_LAST, 0 },
1154 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1155 { ICE_MAC_OFOS, 0 },
1156 { ICE_ETYPE_OL, 12 },
1157 { ICE_VLAN_OFOS, 14},
1159 { ICE_IPV4_OFOS, 26 },
1160 { ICE_PROTOCOL_LAST, 0 },
1163 static const u8 dummy_pppoe_ipv4_packet[] = {
1164 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1165 0x00, 0x00, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1168 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1170 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1172 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1175 0x00, 0x21, /* PPP Link Layer 24 */
1177 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1183 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1187 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1188 { ICE_MAC_OFOS, 0 },
1189 { ICE_ETYPE_OL, 12 },
1190 { ICE_VLAN_OFOS, 14},
1192 { ICE_IPV4_OFOS, 26 },
1194 { ICE_PROTOCOL_LAST, 0 },
1197 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1198 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1199 0x00, 0x00, 0x00, 0x00,
1200 0x00, 0x00, 0x00, 0x00,
1202 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1204 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1206 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1209 0x00, 0x21, /* PPP Link Layer 24 */
1211 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1212 0x00, 0x01, 0x00, 0x00,
1213 0x00, 0x06, 0x00, 0x00,
1214 0x00, 0x00, 0x00, 0x00,
1215 0x00, 0x00, 0x00, 0x00,
1217 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1218 0x00, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x00, 0x00,
1220 0x50, 0x00, 0x00, 0x00,
1221 0x00, 0x00, 0x00, 0x00,
1223 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1227 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1228 { ICE_MAC_OFOS, 0 },
1229 { ICE_ETYPE_OL, 12 },
1230 { ICE_VLAN_OFOS, 14},
1232 { ICE_IPV4_OFOS, 26 },
1233 { ICE_UDP_ILOS, 46 },
1234 { ICE_PROTOCOL_LAST, 0 },
1237 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1238 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1239 0x00, 0x00, 0x00, 0x00,
1240 0x00, 0x00, 0x00, 0x00,
1242 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1244 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1246 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1249 0x00, 0x21, /* PPP Link Layer 24 */
1251 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1252 0x00, 0x01, 0x00, 0x00,
1253 0x00, 0x11, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1255 0x00, 0x00, 0x00, 0x00,
1257 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1258 0x00, 0x08, 0x00, 0x00,
1260 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1263 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1264 { ICE_MAC_OFOS, 0 },
1265 { ICE_ETYPE_OL, 12 },
1266 { ICE_VLAN_OFOS, 14},
1268 { ICE_IPV6_OFOS, 26 },
1269 { ICE_PROTOCOL_LAST, 0 },
1272 static const u8 dummy_pppoe_ipv6_packet[] = {
1273 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1274 0x00, 0x00, 0x00, 0x00,
1275 0x00, 0x00, 0x00, 0x00,
1277 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1279 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1281 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1284 0x00, 0x57, /* PPP Link Layer 24 */
1286 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1287 0x00, 0x00, 0x3b, 0x00,
1288 0x00, 0x00, 0x00, 0x00,
1289 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00,
1291 0x00, 0x00, 0x00, 0x00,
1292 0x00, 0x00, 0x00, 0x00,
1293 0x00, 0x00, 0x00, 0x00,
1294 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x00, 0x00,
1297 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1301 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1302 { ICE_MAC_OFOS, 0 },
1303 { ICE_ETYPE_OL, 12 },
1304 { ICE_VLAN_OFOS, 14},
1306 { ICE_IPV6_OFOS, 26 },
1308 { ICE_PROTOCOL_LAST, 0 },
1311 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1312 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1313 0x00, 0x00, 0x00, 0x00,
1314 0x00, 0x00, 0x00, 0x00,
1316 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1318 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1320 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1323 0x00, 0x57, /* PPP Link Layer 24 */
1325 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1326 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1327 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00,
1329 0x00, 0x00, 0x00, 0x00,
1330 0x00, 0x00, 0x00, 0x00,
1331 0x00, 0x00, 0x00, 0x00,
1332 0x00, 0x00, 0x00, 0x00,
1333 0x00, 0x00, 0x00, 0x00,
1334 0x00, 0x00, 0x00, 0x00,
1336 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1337 0x00, 0x00, 0x00, 0x00,
1338 0x00, 0x00, 0x00, 0x00,
1339 0x50, 0x00, 0x00, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1346 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1347 { ICE_MAC_OFOS, 0 },
1348 { ICE_ETYPE_OL, 12 },
1349 { ICE_VLAN_OFOS, 14},
1351 { ICE_IPV6_OFOS, 26 },
1352 { ICE_UDP_ILOS, 66 },
1353 { ICE_PROTOCOL_LAST, 0 },
1356 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1357 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1358 0x00, 0x00, 0x00, 0x00,
1359 0x00, 0x00, 0x00, 0x00,
1361 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1363 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1365 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1368 0x00, 0x57, /* PPP Link Layer 24 */
1370 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1371 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1372 0x00, 0x00, 0x00, 0x00,
1373 0x00, 0x00, 0x00, 0x00,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1381 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1382 0x00, 0x08, 0x00, 0x00,
1384 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1387 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1388 { ICE_MAC_OFOS, 0 },
1389 { ICE_IPV4_OFOS, 14 },
1391 { ICE_PROTOCOL_LAST, 0 },
1394 static const u8 dummy_ipv4_esp_pkt[] = {
1395 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1396 0x00, 0x00, 0x00, 0x00,
1397 0x00, 0x00, 0x00, 0x00,
1400 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1401 0x00, 0x00, 0x40, 0x00,
1402 0x40, 0x32, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1404 0x00, 0x00, 0x00, 0x00,
1406 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1407 0x00, 0x00, 0x00, 0x00,
1408 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1411 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1412 { ICE_MAC_OFOS, 0 },
1413 { ICE_IPV6_OFOS, 14 },
1415 { ICE_PROTOCOL_LAST, 0 },
1418 static const u8 dummy_ipv6_esp_pkt[] = {
1419 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1420 0x00, 0x00, 0x00, 0x00,
1421 0x00, 0x00, 0x00, 0x00,
1424 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1425 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1426 0x00, 0x00, 0x00, 0x00,
1427 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00,
1429 0x00, 0x00, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1432 0x00, 0x00, 0x00, 0x00,
1433 0x00, 0x00, 0x00, 0x00,
1435 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1436 0x00, 0x00, 0x00, 0x00,
1437 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1440 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1441 { ICE_MAC_OFOS, 0 },
1442 { ICE_IPV4_OFOS, 14 },
1444 { ICE_PROTOCOL_LAST, 0 },
1447 static const u8 dummy_ipv4_ah_pkt[] = {
1448 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1449 0x00, 0x00, 0x00, 0x00,
1450 0x00, 0x00, 0x00, 0x00,
1453 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1454 0x00, 0x00, 0x40, 0x00,
1455 0x40, 0x33, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1457 0x00, 0x00, 0x00, 0x00,
1459 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1460 0x00, 0x00, 0x00, 0x00,
1461 0x00, 0x00, 0x00, 0x00,
1462 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1465 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1466 { ICE_MAC_OFOS, 0 },
1467 { ICE_IPV6_OFOS, 14 },
1469 { ICE_PROTOCOL_LAST, 0 },
1472 static const u8 dummy_ipv6_ah_pkt[] = {
1473 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1474 0x00, 0x00, 0x00, 0x00,
1475 0x00, 0x00, 0x00, 0x00,
1478 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1479 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1480 0x00, 0x00, 0x00, 0x00,
1481 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1485 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00,
1487 0x00, 0x00, 0x00, 0x00,
1489 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1490 0x00, 0x00, 0x00, 0x00,
1491 0x00, 0x00, 0x00, 0x00,
1492 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1495 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1496 { ICE_MAC_OFOS, 0 },
1497 { ICE_IPV4_OFOS, 14 },
1498 { ICE_UDP_ILOS, 34 },
1500 { ICE_PROTOCOL_LAST, 0 },
1503 static const u8 dummy_ipv4_nat_pkt[] = {
1504 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1505 0x00, 0x00, 0x00, 0x00,
1506 0x00, 0x00, 0x00, 0x00,
1509 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1510 0x00, 0x00, 0x40, 0x00,
1511 0x40, 0x11, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1513 0x00, 0x00, 0x00, 0x00,
1515 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1516 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, 0x00, 0x00,
1520 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1523 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1524 { ICE_MAC_OFOS, 0 },
1525 { ICE_IPV6_OFOS, 14 },
1526 { ICE_UDP_ILOS, 54 },
1528 { ICE_PROTOCOL_LAST, 0 },
1531 static const u8 dummy_ipv6_nat_pkt[] = {
1532 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1533 0x00, 0x00, 0x00, 0x00,
1534 0x00, 0x00, 0x00, 0x00,
1537 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1538 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1541 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x00, 0x00,
1543 0x00, 0x00, 0x00, 0x00,
1544 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1546 0x00, 0x00, 0x00, 0x00,
1548 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1549 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1557 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1558 { ICE_MAC_OFOS, 0 },
1559 { ICE_IPV4_OFOS, 14 },
1561 { ICE_PROTOCOL_LAST, 0 },
1564 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1565 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1566 0x00, 0x00, 0x00, 0x00,
1567 0x00, 0x00, 0x00, 0x00,
1570 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1571 0x00, 0x00, 0x40, 0x00,
1572 0x40, 0x73, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1574 0x00, 0x00, 0x00, 0x00,
1576 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1577 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, 0x00, 0x00,
1579 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1582 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1583 { ICE_MAC_OFOS, 0 },
1584 { ICE_IPV6_OFOS, 14 },
1586 { ICE_PROTOCOL_LAST, 0 },
1589 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1590 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1591 0x00, 0x00, 0x00, 0x00,
1592 0x00, 0x00, 0x00, 0x00,
1595 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1596 0x00, 0x0c, 0x73, 0x40,
1597 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1602 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00,
1604 0x00, 0x00, 0x00, 0x00,
1606 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1607 0x00, 0x00, 0x00, 0x00,
1608 0x00, 0x00, 0x00, 0x00,
1609 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1612 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1613 { ICE_MAC_OFOS, 0 },
1614 { ICE_VLAN_EX, 14 },
1615 { ICE_VLAN_OFOS, 18 },
1616 { ICE_IPV4_OFOS, 22 },
1617 { ICE_PROTOCOL_LAST, 0 },
1620 static const u8 dummy_qinq_ipv4_pkt[] = {
1621 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1622 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00,
1626 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1627 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1629 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1630 0x00, 0x01, 0x00, 0x00,
1631 0x00, 0x11, 0x00, 0x00,
1632 0x00, 0x00, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00,
1635 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1636 0x00, 0x08, 0x00, 0x00,
1638 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1641 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1642 { ICE_MAC_OFOS, 0 },
1643 { ICE_VLAN_EX, 14 },
1644 { ICE_VLAN_OFOS, 18 },
1645 { ICE_IPV6_OFOS, 22 },
1646 { ICE_PROTOCOL_LAST, 0 },
1649 static const u8 dummy_qinq_ipv6_pkt[] = {
1650 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1651 0x00, 0x00, 0x00, 0x00,
1652 0x00, 0x00, 0x00, 0x00,
1655 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1656 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1658 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1659 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1660 0x00, 0x00, 0x00, 0x00,
1661 0x00, 0x00, 0x00, 0x00,
1662 0x00, 0x00, 0x00, 0x00,
1663 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00,
1665 0x00, 0x00, 0x00, 0x00,
1666 0x00, 0x00, 0x00, 0x00,
1667 0x00, 0x00, 0x00, 0x00,
1669 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1670 0x00, 0x10, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1673 0x00, 0x00, 0x00, 0x00,
1675 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1678 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1679 { ICE_MAC_OFOS, 0 },
1680 { ICE_VLAN_EX, 14 },
1681 { ICE_VLAN_OFOS, 18 },
1683 { ICE_PROTOCOL_LAST, 0 },
1687 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1688 { ICE_MAC_OFOS, 0 },
1689 { ICE_VLAN_EX, 14 },
1690 { ICE_VLAN_OFOS, 18 },
1692 { ICE_IPV4_OFOS, 30 },
1693 { ICE_PROTOCOL_LAST, 0 },
1696 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1697 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1698 0x00, 0x00, 0x00, 0x00,
1699 0x00, 0x00, 0x00, 0x00,
1702 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1703 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1705 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1708 0x00, 0x21, /* PPP Link Layer 28 */
1710 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1711 0x00, 0x00, 0x00, 0x00,
1712 0x00, 0x00, 0x00, 0x00,
1713 0x00, 0x00, 0x00, 0x00,
1714 0x00, 0x00, 0x00, 0x00,
1716 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1720 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1721 { ICE_MAC_OFOS, 0 },
1722 { ICE_ETYPE_OL, 12 },
1724 { ICE_VLAN_OFOS, 18 },
1726 { ICE_IPV6_OFOS, 30 },
1727 { ICE_PROTOCOL_LAST, 0 },
1730 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1731 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1732 0x00, 0x00, 0x00, 0x00,
1733 0x00, 0x00, 0x00, 0x00,
1735 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1737 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1738 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1740 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1743 0x00, 0x57, /* PPP Link Layer 28*/
1745 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1746 0x00, 0x00, 0x3b, 0x00,
1747 0x00, 0x00, 0x00, 0x00,
1748 0x00, 0x00, 0x00, 0x00,
1749 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, 0x00, 0x00,
1751 0x00, 0x00, 0x00, 0x00,
1752 0x00, 0x00, 0x00, 0x00,
1753 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1759 /* this is a recipe to profile association bitmap */
1760 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1761 ICE_MAX_NUM_PROFILES);
1763 /* this is a profile to recipe association bitmap */
1764 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1765 ICE_MAX_NUM_RECIPES);
1767 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1770 * ice_collect_result_idx - copy result index values
1771 * @buf: buffer that contains the result index
1772 * @recp: the recipe struct to copy data into
1774 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1775 struct ice_sw_recipe *recp)
1777 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1778 ice_set_bit(buf->content.result_indx &
1779 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1783 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1784 * @rid: recipe ID that we are populating
1786 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1788 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1789 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1790 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1791 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1792 enum ice_sw_tunnel_type tun_type;
1793 u16 i, j, profile_num = 0;
1794 bool non_tun_valid = false;
1795 bool pppoe_valid = false;
1796 bool vxlan_valid = false;
1797 bool gre_valid = false;
1798 bool gtp_valid = false;
1799 bool flag_valid = false;
1801 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1802 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1807 for (i = 0; i < 12; i++) {
1808 if (gre_profile[i] == j)
1812 for (i = 0; i < 12; i++) {
1813 if (vxlan_profile[i] == j)
1817 for (i = 0; i < 7; i++) {
1818 if (pppoe_profile[i] == j)
1822 for (i = 0; i < 6; i++) {
1823 if (non_tun_profile[i] == j)
1824 non_tun_valid = true;
1827 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1828 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1831 if ((j >= ICE_PROFID_IPV4_ESP &&
1832 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1833 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1834 j <= ICE_PROFID_IPV6_GTPU_TEID))
1838 if (!non_tun_valid && vxlan_valid)
1839 tun_type = ICE_SW_TUN_VXLAN;
1840 else if (!non_tun_valid && gre_valid)
1841 tun_type = ICE_SW_TUN_NVGRE;
1842 else if (!non_tun_valid && pppoe_valid)
1843 tun_type = ICE_SW_TUN_PPPOE;
1844 else if (!non_tun_valid && gtp_valid)
1845 tun_type = ICE_SW_TUN_GTP;
1846 else if (non_tun_valid &&
1847 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1848 tun_type = ICE_SW_TUN_AND_NON_TUN;
1849 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1851 tun_type = ICE_NON_TUN;
1853 tun_type = ICE_NON_TUN;
1855 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1856 i = ice_is_bit_set(recipe_to_profile[rid],
1857 ICE_PROFID_PPPOE_IPV4_OTHER);
1858 j = ice_is_bit_set(recipe_to_profile[rid],
1859 ICE_PROFID_PPPOE_IPV6_OTHER);
1861 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1863 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1866 if (tun_type == ICE_SW_TUN_GTP) {
1867 if (ice_is_bit_set(recipe_to_profile[rid],
1868 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1869 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1870 else if (ice_is_bit_set(recipe_to_profile[rid],
1871 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1872 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1873 else if (ice_is_bit_set(recipe_to_profile[rid],
1874 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1875 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1876 else if (ice_is_bit_set(recipe_to_profile[rid],
1877 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1878 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1881 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1882 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1883 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1885 case ICE_PROFID_IPV4_TCP:
1886 tun_type = ICE_SW_IPV4_TCP;
1888 case ICE_PROFID_IPV4_UDP:
1889 tun_type = ICE_SW_IPV4_UDP;
1891 case ICE_PROFID_IPV6_TCP:
1892 tun_type = ICE_SW_IPV6_TCP;
1894 case ICE_PROFID_IPV6_UDP:
1895 tun_type = ICE_SW_IPV6_UDP;
1897 case ICE_PROFID_PPPOE_PAY:
1898 tun_type = ICE_SW_TUN_PPPOE_PAY;
1900 case ICE_PROFID_PPPOE_IPV4_TCP:
1901 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1903 case ICE_PROFID_PPPOE_IPV4_UDP:
1904 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1906 case ICE_PROFID_PPPOE_IPV4_OTHER:
1907 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1909 case ICE_PROFID_PPPOE_IPV6_TCP:
1910 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1912 case ICE_PROFID_PPPOE_IPV6_UDP:
1913 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1915 case ICE_PROFID_PPPOE_IPV6_OTHER:
1916 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1918 case ICE_PROFID_IPV4_ESP:
1919 tun_type = ICE_SW_TUN_IPV4_ESP;
1921 case ICE_PROFID_IPV6_ESP:
1922 tun_type = ICE_SW_TUN_IPV6_ESP;
1924 case ICE_PROFID_IPV4_AH:
1925 tun_type = ICE_SW_TUN_IPV4_AH;
1927 case ICE_PROFID_IPV6_AH:
1928 tun_type = ICE_SW_TUN_IPV6_AH;
1930 case ICE_PROFID_IPV4_NAT_T:
1931 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1933 case ICE_PROFID_IPV6_NAT_T:
1934 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1936 case ICE_PROFID_IPV4_PFCP_NODE:
1938 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1940 case ICE_PROFID_IPV6_PFCP_NODE:
1942 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1944 case ICE_PROFID_IPV4_PFCP_SESSION:
1946 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1948 case ICE_PROFID_IPV6_PFCP_SESSION:
1950 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1952 case ICE_PROFID_MAC_IPV4_L2TPV3:
1953 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1955 case ICE_PROFID_MAC_IPV6_L2TPV3:
1956 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1958 case ICE_PROFID_IPV4_GTPU_TEID:
1959 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1961 case ICE_PROFID_IPV6_GTPU_TEID:
1962 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1973 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1974 tun_type = ICE_SW_TUN_PPPOE_QINQ;
1975 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1976 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1977 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1978 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1979 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1980 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1981 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1982 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1983 else if (vlan && tun_type == ICE_NON_TUN)
1984 tun_type = ICE_NON_TUN_QINQ;
1990 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1991 * @hw: pointer to hardware structure
1992 * @recps: struct that we need to populate
1993 * @rid: recipe ID that we are populating
1994 * @refresh_required: true if we should get recipe to profile mapping from FW
1996 * This function is used to populate all the necessary entries into our
1997 * bookkeeping so that we have a current list of all the recipes that are
1998 * programmed in the firmware.
2000 static enum ice_status
2001 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2002 bool *refresh_required)
2004 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2005 struct ice_aqc_recipe_data_elem *tmp;
2006 u16 num_recps = ICE_MAX_NUM_RECIPES;
2007 struct ice_prot_lkup_ext *lkup_exts;
2008 enum ice_status status;
2013 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2015 /* we need a buffer big enough to accommodate all the recipes */
2016 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2017 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2019 return ICE_ERR_NO_MEMORY;
2021 tmp[0].recipe_indx = rid;
2022 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2023 /* non-zero status meaning recipe doesn't exist */
2027 /* Get recipe to profile map so that we can get the fv from lkups that
2028 * we read for a recipe from FW. Since we want to minimize the number of
2029 * times we make this FW call, just make one call and cache the copy
2030 * until a new recipe is added. This operation is only required the
2031 * first time to get the changes from FW. Then to search existing
2032 * entries we don't need to update the cache again until another recipe
2035 if (*refresh_required) {
2036 ice_get_recp_to_prof_map(hw);
2037 *refresh_required = false;
2040 /* Start populating all the entries for recps[rid] based on lkups from
2041 * firmware. Note that we are only creating the root recipe in our
2044 lkup_exts = &recps[rid].lkup_exts;
2046 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2047 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2048 struct ice_recp_grp_entry *rg_entry;
2049 u8 i, prof, idx, prot = 0;
2053 rg_entry = (struct ice_recp_grp_entry *)
2054 ice_malloc(hw, sizeof(*rg_entry));
2056 status = ICE_ERR_NO_MEMORY;
2060 idx = root_bufs.recipe_indx;
2061 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2063 /* Mark all result indices in this chain */
2064 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2065 ice_set_bit(root_bufs.content.result_indx &
2066 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2068 /* get the first profile that is associated with rid */
2069 prof = ice_find_first_bit(recipe_to_profile[idx],
2070 ICE_MAX_NUM_PROFILES);
2071 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2072 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2074 rg_entry->fv_idx[i] = lkup_indx;
2075 rg_entry->fv_mask[i] =
2076 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2078 /* If the recipe is a chained recipe then all its
2079 * child recipe's result will have a result index.
2080 * To fill fv_words we should not use those result
2081 * index, we only need the protocol ids and offsets.
2082 * We will skip all the fv_idx which stores result
2083 * index in them. We also need to skip any fv_idx which
2084 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2085 * valid offset value.
2087 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2088 rg_entry->fv_idx[i]) ||
2089 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2090 rg_entry->fv_idx[i] == 0)
2093 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2094 rg_entry->fv_idx[i], &prot, &off);
2095 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2096 lkup_exts->fv_words[fv_word_idx].off = off;
2097 lkup_exts->field_mask[fv_word_idx] =
2098 rg_entry->fv_mask[i];
2099 if (prot == ICE_META_DATA_ID_HW &&
2100 off == ICE_TUN_FLAG_MDID_OFF)
2104 /* populate rg_list with the data from the child entry of this
2107 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2109 /* Propagate some data to the recipe database */
2110 recps[idx].is_root = !!is_root;
2111 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2112 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2113 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2114 recps[idx].chain_idx = root_bufs.content.result_indx &
2115 ~ICE_AQ_RECIPE_RESULT_EN;
2116 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2118 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2124 /* Only do the following for root recipes entries */
2125 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2126 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2127 recps[idx].root_rid = root_bufs.content.rid &
2128 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2129 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2132 /* Complete initialization of the root recipe entry */
2133 lkup_exts->n_val_words = fv_word_idx;
2134 recps[rid].big_recp = (num_recps > 1);
2135 recps[rid].n_grp_count = (u8)num_recps;
2136 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2137 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2138 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2139 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2140 if (!recps[rid].root_buf)
2143 /* Copy result indexes */
2144 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2145 recps[rid].recp_created = true;
2153 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2154 * @hw: pointer to hardware structure
2156 * This function is used to populate recipe_to_profile matrix where index to
2157 * this array is the recipe ID and the element is the mapping of which profiles
2158 * is this recipe mapped to.
2160 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2162 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2165 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2168 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2169 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2170 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2172 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2173 ICE_MAX_NUM_RECIPES);
2174 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2175 ice_set_bit(i, recipe_to_profile[j]);
2180 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2181 * @hw: pointer to the HW struct
2182 * @recp_list: pointer to sw recipe list
2184 * Allocate memory for the entire recipe table and initialize the structures/
2185 * entries corresponding to basic recipes.
2188 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2190 struct ice_sw_recipe *recps;
2193 recps = (struct ice_sw_recipe *)
2194 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2196 return ICE_ERR_NO_MEMORY;
2198 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2199 recps[i].root_rid = i;
2200 INIT_LIST_HEAD(&recps[i].filt_rules);
2201 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2202 INIT_LIST_HEAD(&recps[i].rg_list);
2203 ice_init_lock(&recps[i].filt_rule_lock);
2212 * ice_aq_get_sw_cfg - get switch configuration
2213 * @hw: pointer to the hardware structure
2214 * @buf: pointer to the result buffer
2215 * @buf_size: length of the buffer available for response
2216 * @req_desc: pointer to requested descriptor
2217 * @num_elems: pointer to number of elements
2218 * @cd: pointer to command details structure or NULL
2220 * Get switch configuration (0x0200) to be placed in buf.
2221 * This admin command returns information such as initial VSI/port number
2222 * and switch ID it belongs to.
2224 * NOTE: *req_desc is both an input/output parameter.
2225 * The caller of this function first calls this function with *request_desc set
2226 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2227 * configuration information has been returned; if non-zero (meaning not all
2228 * the information was returned), the caller should call this function again
2229 * with *req_desc set to the previous value returned by f/w to get the
2230 * next block of switch configuration information.
2232 * *num_elems is output only parameter. This reflects the number of elements
2233 * in response buffer. The caller of this function to use *num_elems while
2234 * parsing the response buffer.
2236 static enum ice_status
2237 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2238 u16 buf_size, u16 *req_desc, u16 *num_elems,
2239 struct ice_sq_cd *cd)
2241 struct ice_aqc_get_sw_cfg *cmd;
2242 struct ice_aq_desc desc;
2243 enum ice_status status;
2245 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2246 cmd = &desc.params.get_sw_conf;
2247 cmd->element = CPU_TO_LE16(*req_desc);
2249 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2251 *req_desc = LE16_TO_CPU(cmd->element);
2252 *num_elems = LE16_TO_CPU(cmd->num_elems);
2259 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2260 * @hw: pointer to the HW struct
2261 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2262 * @global_lut_id: output parameter for the RSS global LUT's ID
2264 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2266 struct ice_aqc_alloc_free_res_elem *sw_buf;
2267 enum ice_status status;
2270 buf_len = ice_struct_size(sw_buf, elem, 1);
2271 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2273 return ICE_ERR_NO_MEMORY;
2275 sw_buf->num_elems = CPU_TO_LE16(1);
2276 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2277 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2278 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2280 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2282 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2283 shared_res ? "shared" : "dedicated", status);
2284 goto ice_alloc_global_lut_exit;
2287 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2289 ice_alloc_global_lut_exit:
2290 ice_free(hw, sw_buf);
2295 * ice_free_global_lut - free a RSS global LUT
2296 * @hw: pointer to the HW struct
2297 * @global_lut_id: ID of the RSS global LUT to free
2299 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2301 struct ice_aqc_alloc_free_res_elem *sw_buf;
2302 u16 buf_len, num_elems = 1;
2303 enum ice_status status;
2305 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2306 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2308 return ICE_ERR_NO_MEMORY;
2310 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2311 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2312 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2314 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2316 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2317 global_lut_id, status);
2319 ice_free(hw, sw_buf);
2324 * ice_alloc_sw - allocate resources specific to switch
2325 * @hw: pointer to the HW struct
2326 * @ena_stats: true to turn on VEB stats
2327 * @shared_res: true for shared resource, false for dedicated resource
2328 * @sw_id: switch ID returned
2329 * @counter_id: VEB counter ID returned
2331 * allocates switch resources (SWID and VEB counter) (0x0208)
2334 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2337 struct ice_aqc_alloc_free_res_elem *sw_buf;
2338 struct ice_aqc_res_elem *sw_ele;
2339 enum ice_status status;
2342 buf_len = ice_struct_size(sw_buf, elem, 1);
2343 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2345 return ICE_ERR_NO_MEMORY;
2347 /* Prepare buffer for switch ID.
2348 * The number of resource entries in buffer is passed as 1 since only a
2349 * single switch/VEB instance is allocated, and hence a single sw_id
2352 sw_buf->num_elems = CPU_TO_LE16(1);
2354 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2355 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2356 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2358 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2359 ice_aqc_opc_alloc_res, NULL);
2362 goto ice_alloc_sw_exit;
2364 sw_ele = &sw_buf->elem[0];
2365 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2368 /* Prepare buffer for VEB Counter */
2369 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2370 struct ice_aqc_alloc_free_res_elem *counter_buf;
2371 struct ice_aqc_res_elem *counter_ele;
2373 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2374 ice_malloc(hw, buf_len);
2376 status = ICE_ERR_NO_MEMORY;
2377 goto ice_alloc_sw_exit;
2380 /* The number of resource entries in buffer is passed as 1 since
2381 * only a single switch/VEB instance is allocated, and hence a
2382 * single VEB counter is requested.
2384 counter_buf->num_elems = CPU_TO_LE16(1);
2385 counter_buf->res_type =
2386 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2387 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2388 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2392 ice_free(hw, counter_buf);
2393 goto ice_alloc_sw_exit;
2395 counter_ele = &counter_buf->elem[0];
2396 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2397 ice_free(hw, counter_buf);
2401 ice_free(hw, sw_buf);
2406 * ice_free_sw - free resources specific to switch
2407 * @hw: pointer to the HW struct
2408 * @sw_id: switch ID returned
2409 * @counter_id: VEB counter ID returned
2411 * free switch resources (SWID and VEB counter) (0x0209)
2413 * NOTE: This function frees multiple resources. It continues
2414 * releasing other resources even after it encounters error.
2415 * The error code returned is the last error it encountered.
2417 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2419 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2420 enum ice_status status, ret_status;
2423 buf_len = ice_struct_size(sw_buf, elem, 1);
2424 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2426 return ICE_ERR_NO_MEMORY;
2428 /* Prepare buffer to free for switch ID res.
2429 * The number of resource entries in buffer is passed as 1 since only a
2430 * single switch/VEB instance is freed, and hence a single sw_id
2433 sw_buf->num_elems = CPU_TO_LE16(1);
2434 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2435 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2437 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2438 ice_aqc_opc_free_res, NULL);
2441 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2443 /* Prepare buffer to free for VEB Counter resource */
2444 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2445 ice_malloc(hw, buf_len);
2447 ice_free(hw, sw_buf);
2448 return ICE_ERR_NO_MEMORY;
2451 /* The number of resource entries in buffer is passed as 1 since only a
2452 * single switch/VEB instance is freed, and hence a single VEB counter
2455 counter_buf->num_elems = CPU_TO_LE16(1);
2456 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2457 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2459 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2460 ice_aqc_opc_free_res, NULL);
2462 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2463 ret_status = status;
2466 ice_free(hw, counter_buf);
2467 ice_free(hw, sw_buf);
2473 * @hw: pointer to the HW struct
2474 * @vsi_ctx: pointer to a VSI context struct
2475 * @cd: pointer to command details structure or NULL
2477 * Add a VSI context to the hardware (0x0210)
2480 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2481 struct ice_sq_cd *cd)
2483 struct ice_aqc_add_update_free_vsi_resp *res;
2484 struct ice_aqc_add_get_update_free_vsi *cmd;
2485 struct ice_aq_desc desc;
2486 enum ice_status status;
2488 cmd = &desc.params.vsi_cmd;
2489 res = &desc.params.add_update_free_vsi_res;
2491 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2493 if (!vsi_ctx->alloc_from_pool)
2494 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2495 ICE_AQ_VSI_IS_VALID);
2497 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2499 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2501 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2502 sizeof(vsi_ctx->info), cd);
2505 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2506 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2507 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2515 * @hw: pointer to the HW struct
2516 * @vsi_ctx: pointer to a VSI context struct
2517 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2518 * @cd: pointer to command details structure or NULL
2520 * Free VSI context info from hardware (0x0213)
2523 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2524 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2526 struct ice_aqc_add_update_free_vsi_resp *resp;
2527 struct ice_aqc_add_get_update_free_vsi *cmd;
2528 struct ice_aq_desc desc;
2529 enum ice_status status;
2531 cmd = &desc.params.vsi_cmd;
2532 resp = &desc.params.add_update_free_vsi_res;
2534 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2536 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2538 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2540 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2542 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2543 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2551 * @hw: pointer to the HW struct
2552 * @vsi_ctx: pointer to a VSI context struct
2553 * @cd: pointer to command details structure or NULL
2555 * Update VSI context in the hardware (0x0211)
2558 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2559 struct ice_sq_cd *cd)
2561 struct ice_aqc_add_update_free_vsi_resp *resp;
2562 struct ice_aqc_add_get_update_free_vsi *cmd;
2563 struct ice_aq_desc desc;
2564 enum ice_status status;
2566 cmd = &desc.params.vsi_cmd;
2567 resp = &desc.params.add_update_free_vsi_res;
2569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2571 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2573 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2575 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2576 sizeof(vsi_ctx->info), cd);
2579 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2580 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2587 * ice_is_vsi_valid - check whether the VSI is valid or not
2588 * @hw: pointer to the HW struct
2589 * @vsi_handle: VSI handle
2591 * check whether the VSI is valid or not
2593 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2595 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2599 * ice_get_hw_vsi_num - return the HW VSI number
2600 * @hw: pointer to the HW struct
2601 * @vsi_handle: VSI handle
2603 * return the HW VSI number
2604 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2606 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2608 return hw->vsi_ctx[vsi_handle]->vsi_num;
2612 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2613 * @hw: pointer to the HW struct
2614 * @vsi_handle: VSI handle
2616 * return the VSI context entry for a given VSI handle
2618 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2620 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2624 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2625 * @hw: pointer to the HW struct
2626 * @vsi_handle: VSI handle
2627 * @vsi: VSI context pointer
2629 * save the VSI context entry for a given VSI handle
2632 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2634 hw->vsi_ctx[vsi_handle] = vsi;
2638 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2639 * @hw: pointer to the HW struct
2640 * @vsi_handle: VSI handle
2642 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2644 struct ice_vsi_ctx *vsi;
2647 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2650 ice_for_each_traffic_class(i) {
2651 if (vsi->lan_q_ctx[i]) {
2652 ice_free(hw, vsi->lan_q_ctx[i]);
2653 vsi->lan_q_ctx[i] = NULL;
2659 * ice_clear_vsi_ctx - clear the VSI context entry
2660 * @hw: pointer to the HW struct
2661 * @vsi_handle: VSI handle
2663 * clear the VSI context entry
2665 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2667 struct ice_vsi_ctx *vsi;
2669 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2671 ice_clear_vsi_q_ctx(hw, vsi_handle);
2673 hw->vsi_ctx[vsi_handle] = NULL;
2678 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2679 * @hw: pointer to the HW struct
2681 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2685 for (i = 0; i < ICE_MAX_VSI; i++)
2686 ice_clear_vsi_ctx(hw, i);
2690 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2691 * @hw: pointer to the HW struct
2692 * @vsi_handle: unique VSI handle provided by drivers
2693 * @vsi_ctx: pointer to a VSI context struct
2694 * @cd: pointer to command details structure or NULL
2696 * Add a VSI context to the hardware also add it into the VSI handle list.
2697 * If this function gets called after reset for existing VSIs then update
2698 * with the new HW VSI number in the corresponding VSI handle list entry.
2701 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2702 struct ice_sq_cd *cd)
2704 struct ice_vsi_ctx *tmp_vsi_ctx;
2705 enum ice_status status;
2707 if (vsi_handle >= ICE_MAX_VSI)
2708 return ICE_ERR_PARAM;
2709 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2712 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2714 /* Create a new VSI context */
2715 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2716 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2718 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2719 return ICE_ERR_NO_MEMORY;
2721 *tmp_vsi_ctx = *vsi_ctx;
2723 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2725 /* update with new HW VSI num */
2726 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2733 * ice_free_vsi- free VSI context from hardware and VSI handle list
2734 * @hw: pointer to the HW struct
2735 * @vsi_handle: unique VSI handle
2736 * @vsi_ctx: pointer to a VSI context struct
2737 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2738 * @cd: pointer to command details structure or NULL
2740 * Free VSI context info from hardware as well as from VSI handle list
2743 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2744 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2746 enum ice_status status;
2748 if (!ice_is_vsi_valid(hw, vsi_handle))
2749 return ICE_ERR_PARAM;
2750 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2751 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2753 ice_clear_vsi_ctx(hw, vsi_handle);
2759 * @hw: pointer to the HW struct
2760 * @vsi_handle: unique VSI handle
2761 * @vsi_ctx: pointer to a VSI context struct
2762 * @cd: pointer to command details structure or NULL
2764 * Update VSI context in the hardware
2767 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2768 struct ice_sq_cd *cd)
2770 if (!ice_is_vsi_valid(hw, vsi_handle))
2771 return ICE_ERR_PARAM;
2772 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2773 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2777 * ice_aq_get_vsi_params
2778 * @hw: pointer to the HW struct
2779 * @vsi_ctx: pointer to a VSI context struct
2780 * @cd: pointer to command details structure or NULL
2782 * Get VSI context info from hardware (0x0212)
2785 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2786 struct ice_sq_cd *cd)
2788 struct ice_aqc_add_get_update_free_vsi *cmd;
2789 struct ice_aqc_get_vsi_resp *resp;
2790 struct ice_aq_desc desc;
2791 enum ice_status status;
2793 cmd = &desc.params.vsi_cmd;
2794 resp = &desc.params.get_vsi_resp;
2796 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2798 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2800 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2801 sizeof(vsi_ctx->info), cd);
2803 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2805 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2806 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2813 * ice_aq_add_update_mir_rule - add/update a mirror rule
2814 * @hw: pointer to the HW struct
2815 * @rule_type: Rule Type
2816 * @dest_vsi: VSI number to which packets will be mirrored
2817 * @count: length of the list
2818 * @mr_buf: buffer for list of mirrored VSI numbers
2819 * @cd: pointer to command details structure or NULL
2822 * Add/Update Mirror Rule (0x260).
2825 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2826 u16 count, struct ice_mir_rule_buf *mr_buf,
2827 struct ice_sq_cd *cd, u16 *rule_id)
2829 struct ice_aqc_add_update_mir_rule *cmd;
2830 struct ice_aq_desc desc;
2831 enum ice_status status;
2832 __le16 *mr_list = NULL;
2835 switch (rule_type) {
2836 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2837 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2838 /* Make sure count and mr_buf are set for these rule_types */
2839 if (!(count && mr_buf))
2840 return ICE_ERR_PARAM;
2842 buf_size = count * sizeof(__le16);
2843 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2845 return ICE_ERR_NO_MEMORY;
2847 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2848 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2849 /* Make sure count and mr_buf are not set for these
2852 if (count || mr_buf)
2853 return ICE_ERR_PARAM;
2856 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2857 return ICE_ERR_OUT_OF_RANGE;
2860 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2862 /* Pre-process 'mr_buf' items for add/update of virtual port
2863 * ingress/egress mirroring (but not physical port ingress/egress
2869 for (i = 0; i < count; i++) {
2872 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2874 /* Validate specified VSI number, make sure it is less
2875 * than ICE_MAX_VSI, if not return with error.
2877 if (id >= ICE_MAX_VSI) {
2878 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2880 ice_free(hw, mr_list);
2881 return ICE_ERR_OUT_OF_RANGE;
2884 /* add VSI to mirror rule */
2887 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2888 else /* remove VSI from mirror rule */
2889 mr_list[i] = CPU_TO_LE16(id);
2893 cmd = &desc.params.add_update_rule;
2894 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2895 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2896 ICE_AQC_RULE_ID_VALID_M);
2897 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2898 cmd->num_entries = CPU_TO_LE16(count);
2899 cmd->dest = CPU_TO_LE16(dest_vsi);
2901 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2903 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2905 ice_free(hw, mr_list);
2911 * ice_aq_delete_mir_rule - delete a mirror rule
2912 * @hw: pointer to the HW struct
2913 * @rule_id: Mirror rule ID (to be deleted)
2914 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2915 * otherwise it is returned to the shared pool
2916 * @cd: pointer to command details structure or NULL
2918 * Delete Mirror Rule (0x261).
2921 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2922 struct ice_sq_cd *cd)
2924 struct ice_aqc_delete_mir_rule *cmd;
2925 struct ice_aq_desc desc;
2927 /* rule_id should be in the range 0...63 */
2928 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2929 return ICE_ERR_OUT_OF_RANGE;
2931 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2933 cmd = &desc.params.del_rule;
2934 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2935 cmd->rule_id = CPU_TO_LE16(rule_id);
2938 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2940 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2944 * ice_aq_alloc_free_vsi_list
2945 * @hw: pointer to the HW struct
2946 * @vsi_list_id: VSI list ID returned or used for lookup
2947 * @lkup_type: switch rule filter lookup type
2948 * @opc: switch rules population command type - pass in the command opcode
2950 * allocates or free a VSI list resource
2952 static enum ice_status
2953 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2954 enum ice_sw_lkup_type lkup_type,
2955 enum ice_adminq_opc opc)
2957 struct ice_aqc_alloc_free_res_elem *sw_buf;
2958 struct ice_aqc_res_elem *vsi_ele;
2959 enum ice_status status;
2962 buf_len = ice_struct_size(sw_buf, elem, 1);
2963 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2965 return ICE_ERR_NO_MEMORY;
2966 sw_buf->num_elems = CPU_TO_LE16(1);
2968 if (lkup_type == ICE_SW_LKUP_MAC ||
2969 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2970 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2971 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2972 lkup_type == ICE_SW_LKUP_PROMISC ||
2973 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2974 lkup_type == ICE_SW_LKUP_LAST) {
2975 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2976 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2978 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2980 status = ICE_ERR_PARAM;
2981 goto ice_aq_alloc_free_vsi_list_exit;
2984 if (opc == ice_aqc_opc_free_res)
2985 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2987 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2989 goto ice_aq_alloc_free_vsi_list_exit;
2991 if (opc == ice_aqc_opc_alloc_res) {
2992 vsi_ele = &sw_buf->elem[0];
2993 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2996 ice_aq_alloc_free_vsi_list_exit:
2997 ice_free(hw, sw_buf);
3002 * ice_aq_set_storm_ctrl - Sets storm control configuration
3003 * @hw: pointer to the HW struct
3004 * @bcast_thresh: represents the upper threshold for broadcast storm control
3005 * @mcast_thresh: represents the upper threshold for multicast storm control
3006 * @ctl_bitmask: storm control knobs
3008 * Sets the storm control configuration (0x0280)
3011 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3014 struct ice_aqc_storm_cfg *cmd;
3015 struct ice_aq_desc desc;
3017 cmd = &desc.params.storm_conf;
3019 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3021 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3022 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3023 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3025 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3029 * ice_aq_get_storm_ctrl - gets storm control configuration
3030 * @hw: pointer to the HW struct
3031 * @bcast_thresh: represents the upper threshold for broadcast storm control
3032 * @mcast_thresh: represents the upper threshold for multicast storm control
3033 * @ctl_bitmask: storm control knobs
3035 * Gets the storm control configuration (0x0281)
3038 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3041 enum ice_status status;
3042 struct ice_aq_desc desc;
3044 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3046 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3048 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3051 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3054 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3057 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3064 * ice_aq_sw_rules - add/update/remove switch rules
3065 * @hw: pointer to the HW struct
3066 * @rule_list: pointer to switch rule population list
3067 * @rule_list_sz: total size of the rule list in bytes
3068 * @num_rules: number of switch rules in the rule_list
3069 * @opc: switch rules population command type - pass in the command opcode
3070 * @cd: pointer to command details structure or NULL
3072 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3074 static enum ice_status
3075 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3076 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3078 struct ice_aq_desc desc;
3079 enum ice_status status;
3081 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3083 if (opc != ice_aqc_opc_add_sw_rules &&
3084 opc != ice_aqc_opc_update_sw_rules &&
3085 opc != ice_aqc_opc_remove_sw_rules)
3086 return ICE_ERR_PARAM;
3088 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3090 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3091 desc.params.sw_rules.num_rules_fltr_entry_index =
3092 CPU_TO_LE16(num_rules);
3093 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3094 if (opc != ice_aqc_opc_add_sw_rules &&
3095 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3096 status = ICE_ERR_DOES_NOT_EXIST;
3102 * ice_aq_add_recipe - add switch recipe
3103 * @hw: pointer to the HW struct
3104 * @s_recipe_list: pointer to switch rule population list
3105 * @num_recipes: number of switch recipes in the list
3106 * @cd: pointer to command details structure or NULL
3111 ice_aq_add_recipe(struct ice_hw *hw,
3112 struct ice_aqc_recipe_data_elem *s_recipe_list,
3113 u16 num_recipes, struct ice_sq_cd *cd)
3115 struct ice_aqc_add_get_recipe *cmd;
3116 struct ice_aq_desc desc;
3119 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3120 cmd = &desc.params.add_get_recipe;
3121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3123 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3124 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3126 buf_size = num_recipes * sizeof(*s_recipe_list);
3128 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3132 * ice_aq_get_recipe - get switch recipe
3133 * @hw: pointer to the HW struct
3134 * @s_recipe_list: pointer to switch rule population list
3135 * @num_recipes: pointer to the number of recipes (input and output)
3136 * @recipe_root: root recipe number of recipe(s) to retrieve
3137 * @cd: pointer to command details structure or NULL
3141 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3142 * On output, *num_recipes will equal the number of entries returned in
3145 * The caller must supply enough space in s_recipe_list to hold all possible
3146 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3149 ice_aq_get_recipe(struct ice_hw *hw,
3150 struct ice_aqc_recipe_data_elem *s_recipe_list,
3151 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3153 struct ice_aqc_add_get_recipe *cmd;
3154 struct ice_aq_desc desc;
3155 enum ice_status status;
3158 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3159 return ICE_ERR_PARAM;
3161 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3162 cmd = &desc.params.add_get_recipe;
3163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3165 cmd->return_index = CPU_TO_LE16(recipe_root);
3166 cmd->num_sub_recipes = 0;
3168 buf_size = *num_recipes * sizeof(*s_recipe_list);
3170 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3171 /* cppcheck-suppress constArgument */
3172 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3178 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3179 * @hw: pointer to the HW struct
3180 * @profile_id: package profile ID to associate the recipe with
3181 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3182 * @cd: pointer to command details structure or NULL
3183 * Recipe to profile association (0x0291)
3186 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3187 struct ice_sq_cd *cd)
3189 struct ice_aqc_recipe_to_profile *cmd;
3190 struct ice_aq_desc desc;
3192 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3193 cmd = &desc.params.recipe_to_profile;
3194 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3195 cmd->profile_id = CPU_TO_LE16(profile_id);
3196 /* Set the recipe ID bit in the bitmask to let the device know which
3197 * profile we are associating the recipe to
3199 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3200 ICE_NONDMA_TO_NONDMA);
3202 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3206 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3207 * @hw: pointer to the HW struct
3208 * @profile_id: package profile ID to associate the recipe with
3209 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3210 * @cd: pointer to command details structure or NULL
3211 * Associate profile ID with given recipe (0x0293)
3214 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3215 struct ice_sq_cd *cd)
3217 struct ice_aqc_recipe_to_profile *cmd;
3218 struct ice_aq_desc desc;
3219 enum ice_status status;
3221 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3222 cmd = &desc.params.recipe_to_profile;
3223 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3224 cmd->profile_id = CPU_TO_LE16(profile_id);
3226 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3228 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3229 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3235 * ice_alloc_recipe - add recipe resource
3236 * @hw: pointer to the hardware structure
3237 * @rid: recipe ID returned as response to AQ call
3239 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3241 struct ice_aqc_alloc_free_res_elem *sw_buf;
3242 enum ice_status status;
3245 buf_len = ice_struct_size(sw_buf, elem, 1);
3246 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3248 return ICE_ERR_NO_MEMORY;
3250 sw_buf->num_elems = CPU_TO_LE16(1);
3251 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3252 ICE_AQC_RES_TYPE_S) |
3253 ICE_AQC_RES_TYPE_FLAG_SHARED);
3254 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3255 ice_aqc_opc_alloc_res, NULL);
3257 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3258 ice_free(hw, sw_buf);
3263 /* ice_init_port_info - Initialize port_info with switch configuration data
3264 * @pi: pointer to port_info
3265 * @vsi_port_num: VSI number or port number
3266 * @type: Type of switch element (port or VSI)
3267 * @swid: switch ID of the switch the element is attached to
3268 * @pf_vf_num: PF or VF number
3269 * @is_vf: true if the element is a VF, false otherwise
3272 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3273 u16 swid, u16 pf_vf_num, bool is_vf)
3276 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3277 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3279 pi->pf_vf_num = pf_vf_num;
3281 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3282 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3285 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3290 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3291 * @hw: pointer to the hardware structure
3293 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3295 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3296 enum ice_status status;
3303 num_total_ports = 1;
3305 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3306 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3309 return ICE_ERR_NO_MEMORY;
3311 /* Multiple calls to ice_aq_get_sw_cfg may be required
3312 * to get all the switch configuration information. The need
3313 * for additional calls is indicated by ice_aq_get_sw_cfg
3314 * writing a non-zero value in req_desc
3317 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3319 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3320 &req_desc, &num_elems, NULL);
3325 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3326 u16 pf_vf_num, swid, vsi_port_num;
3330 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3331 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3333 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3334 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3336 swid = LE16_TO_CPU(ele->swid);
3338 if (LE16_TO_CPU(ele->pf_vf_num) &
3339 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3342 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3343 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3346 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3347 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3348 if (j == num_total_ports) {
3349 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3350 status = ICE_ERR_CFG;
3353 ice_init_port_info(hw->port_info,
3354 vsi_port_num, res_type, swid,
3362 } while (req_desc && !status);
3370 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3371 * @hw: pointer to the hardware structure
3372 * @fi: filter info structure to fill/update
3374 * This helper function populates the lb_en and lan_en elements of the provided
3375 * ice_fltr_info struct using the switch's type and characteristics of the
3376 * switch rule being configured.
3378 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3380 if ((fi->flag & ICE_FLTR_RX) &&
3381 (fi->fltr_act == ICE_FWD_TO_VSI ||
3382 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3383 fi->lkup_type == ICE_SW_LKUP_LAST)
3387 if ((fi->flag & ICE_FLTR_TX) &&
3388 (fi->fltr_act == ICE_FWD_TO_VSI ||
3389 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3390 fi->fltr_act == ICE_FWD_TO_Q ||
3391 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3392 /* Setting LB for prune actions will result in replicated
3393 * packets to the internal switch that will be dropped.
3395 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3398 /* Set lan_en to TRUE if
3399 * 1. The switch is a VEB AND
3401 * 2.1 The lookup is a directional lookup like ethertype,
3402 * promiscuous, ethertype-MAC, promiscuous-VLAN
3403 * and default-port OR
3404 * 2.2 The lookup is VLAN, OR
3405 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3406 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3410 * The switch is a VEPA.
3412 * In all other cases, the LAN enable has to be set to false.
3415 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3416 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3417 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3418 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3419 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3420 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3421 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3422 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3423 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3424 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3433 * ice_fill_sw_rule - Helper function to fill switch rule structure
3434 * @hw: pointer to the hardware structure
3435 * @f_info: entry containing packet forwarding information
3436 * @s_rule: switch rule structure to be filled in based on mac_entry
3437 * @opc: switch rules population command type - pass in the command opcode
3440 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3441 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3443 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3451 if (opc == ice_aqc_opc_remove_sw_rules) {
3452 s_rule->pdata.lkup_tx_rx.act = 0;
3453 s_rule->pdata.lkup_tx_rx.index =
3454 CPU_TO_LE16(f_info->fltr_rule_id);
3455 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3459 eth_hdr_sz = sizeof(dummy_eth_header);
3460 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3462 /* initialize the ether header with a dummy header */
3463 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3464 ice_fill_sw_info(hw, f_info);
3466 switch (f_info->fltr_act) {
3467 case ICE_FWD_TO_VSI:
3468 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3469 ICE_SINGLE_ACT_VSI_ID_M;
3470 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3471 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3472 ICE_SINGLE_ACT_VALID_BIT;
3474 case ICE_FWD_TO_VSI_LIST:
3475 act |= ICE_SINGLE_ACT_VSI_LIST;
3476 act |= (f_info->fwd_id.vsi_list_id <<
3477 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3478 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3479 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3480 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3481 ICE_SINGLE_ACT_VALID_BIT;
3484 act |= ICE_SINGLE_ACT_TO_Q;
3485 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3486 ICE_SINGLE_ACT_Q_INDEX_M;
3488 case ICE_DROP_PACKET:
3489 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3490 ICE_SINGLE_ACT_VALID_BIT;
3492 case ICE_FWD_TO_QGRP:
3493 q_rgn = f_info->qgrp_size > 0 ?
3494 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3495 act |= ICE_SINGLE_ACT_TO_Q;
3496 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3497 ICE_SINGLE_ACT_Q_INDEX_M;
3498 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3499 ICE_SINGLE_ACT_Q_REGION_M;
3506 act |= ICE_SINGLE_ACT_LB_ENABLE;
3508 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3510 switch (f_info->lkup_type) {
3511 case ICE_SW_LKUP_MAC:
3512 daddr = f_info->l_data.mac.mac_addr;
3514 case ICE_SW_LKUP_VLAN:
3515 vlan_id = f_info->l_data.vlan.vlan_id;
3516 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3517 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3518 act |= ICE_SINGLE_ACT_PRUNE;
3519 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3522 case ICE_SW_LKUP_ETHERTYPE_MAC:
3523 daddr = f_info->l_data.ethertype_mac.mac_addr;
3525 case ICE_SW_LKUP_ETHERTYPE:
3526 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3527 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3529 case ICE_SW_LKUP_MAC_VLAN:
3530 daddr = f_info->l_data.mac_vlan.mac_addr;
3531 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3533 case ICE_SW_LKUP_PROMISC_VLAN:
3534 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3536 case ICE_SW_LKUP_PROMISC:
3537 daddr = f_info->l_data.mac_vlan.mac_addr;
3543 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3544 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3545 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3547 /* Recipe set depending on lookup type */
3548 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3549 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3550 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3553 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3554 ICE_NONDMA_TO_NONDMA);
3556 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3557 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3558 *off = CPU_TO_BE16(vlan_id);
3561 /* Create the switch rule with the final dummy Ethernet header */
3562 if (opc != ice_aqc_opc_update_sw_rules)
3563 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3567 * ice_add_marker_act
3568 * @hw: pointer to the hardware structure
3569 * @m_ent: the management entry for which sw marker needs to be added
3570 * @sw_marker: sw marker to tag the Rx descriptor with
3571 * @l_id: large action resource ID
3573 * Create a large action to hold software marker and update the switch rule
3574 * entry pointed by m_ent with newly created large action
3576 static enum ice_status
3577 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3578 u16 sw_marker, u16 l_id)
3580 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3581 /* For software marker we need 3 large actions
3582 * 1. FWD action: FWD TO VSI or VSI LIST
3583 * 2. GENERIC VALUE action to hold the profile ID
3584 * 3. GENERIC VALUE action to hold the software marker ID
3586 const u16 num_lg_acts = 3;
3587 enum ice_status status;
3593 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3594 return ICE_ERR_PARAM;
3596 /* Create two back-to-back switch rules and submit them to the HW using
3597 * one memory buffer:
3601 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3602 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3603 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3605 return ICE_ERR_NO_MEMORY;
3607 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3609 /* Fill in the first switch rule i.e. large action */
3610 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3611 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3612 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3614 /* First action VSI forwarding or VSI list forwarding depending on how
3617 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3618 m_ent->fltr_info.fwd_id.hw_vsi_id;
3620 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3621 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3622 if (m_ent->vsi_count > 1)
3623 act |= ICE_LG_ACT_VSI_LIST;
3624 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3626 /* Second action descriptor type */
3627 act = ICE_LG_ACT_GENERIC;
3629 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3630 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3632 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3633 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3635 /* Third action Marker value */
3636 act |= ICE_LG_ACT_GENERIC;
3637 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3638 ICE_LG_ACT_GENERIC_VALUE_M;
3640 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3642 /* call the fill switch rule to fill the lookup Tx Rx structure */
3643 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3644 ice_aqc_opc_update_sw_rules);
3646 /* Update the action to point to the large action ID */
3647 rx_tx->pdata.lkup_tx_rx.act =
3648 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3649 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3650 ICE_SINGLE_ACT_PTR_VAL_M));
3652 /* Use the filter rule ID of the previously created rule with single
3653 * act. Once the update happens, hardware will treat this as large
3656 rx_tx->pdata.lkup_tx_rx.index =
3657 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3659 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3660 ice_aqc_opc_update_sw_rules, NULL);
3662 m_ent->lg_act_idx = l_id;
3663 m_ent->sw_marker_id = sw_marker;
3666 ice_free(hw, lg_act);
3671 * ice_add_counter_act - add/update filter rule with counter action
3672 * @hw: pointer to the hardware structure
3673 * @m_ent: the management entry for which counter needs to be added
3674 * @counter_id: VLAN counter ID returned as part of allocate resource
3675 * @l_id: large action resource ID
3677 static enum ice_status
3678 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3679 u16 counter_id, u16 l_id)
3681 struct ice_aqc_sw_rules_elem *lg_act;
3682 struct ice_aqc_sw_rules_elem *rx_tx;
3683 enum ice_status status;
3684 /* 2 actions will be added while adding a large action counter */
3685 const int num_acts = 2;
3692 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3693 return ICE_ERR_PARAM;
3695 /* Create two back-to-back switch rules and submit them to the HW using
3696 * one memory buffer:
3700 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3701 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3702 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3704 return ICE_ERR_NO_MEMORY;
3706 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3708 /* Fill in the first switch rule i.e. large action */
3709 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3710 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3711 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3713 /* First action VSI forwarding or VSI list forwarding depending on how
3716 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3717 m_ent->fltr_info.fwd_id.hw_vsi_id;
3719 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3720 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3721 ICE_LG_ACT_VSI_LIST_ID_M;
3722 if (m_ent->vsi_count > 1)
3723 act |= ICE_LG_ACT_VSI_LIST;
3724 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3726 /* Second action counter ID */
3727 act = ICE_LG_ACT_STAT_COUNT;
3728 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3729 ICE_LG_ACT_STAT_COUNT_M;
3730 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3732 /* call the fill switch rule to fill the lookup Tx Rx structure */
3733 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3734 ice_aqc_opc_update_sw_rules);
3736 act = ICE_SINGLE_ACT_PTR;
3737 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3738 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3740 /* Use the filter rule ID of the previously created rule with single
3741 * act. Once the update happens, hardware will treat this as large
3744 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3745 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3747 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3748 ice_aqc_opc_update_sw_rules, NULL);
3750 m_ent->lg_act_idx = l_id;
3751 m_ent->counter_index = counter_id;
3754 ice_free(hw, lg_act);
3759 * ice_create_vsi_list_map
3760 * @hw: pointer to the hardware structure
3761 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3762 * @num_vsi: number of VSI handles in the array
3763 * @vsi_list_id: VSI list ID generated as part of allocate resource
3765 * Helper function to create a new entry of VSI list ID to VSI mapping
3766 * using the given VSI list ID
3768 static struct ice_vsi_list_map_info *
3769 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3772 struct ice_switch_info *sw = hw->switch_info;
3773 struct ice_vsi_list_map_info *v_map;
3776 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3780 v_map->vsi_list_id = vsi_list_id;
3782 for (i = 0; i < num_vsi; i++)
3783 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3785 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3790 * ice_update_vsi_list_rule
3791 * @hw: pointer to the hardware structure
3792 * @vsi_handle_arr: array of VSI handles to form a VSI list
3793 * @num_vsi: number of VSI handles in the array
3794 * @vsi_list_id: VSI list ID generated as part of allocate resource
3795 * @remove: Boolean value to indicate if this is a remove action
3796 * @opc: switch rules population command type - pass in the command opcode
3797 * @lkup_type: lookup type of the filter
3799 * Call AQ command to add a new switch rule or update existing switch rule
3800 * using the given VSI list ID
3802 static enum ice_status
3803 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3804 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3805 enum ice_sw_lkup_type lkup_type)
3807 struct ice_aqc_sw_rules_elem *s_rule;
3808 enum ice_status status;
3814 return ICE_ERR_PARAM;
3816 if (lkup_type == ICE_SW_LKUP_MAC ||
3817 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3818 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3819 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3820 lkup_type == ICE_SW_LKUP_PROMISC ||
3821 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3822 lkup_type == ICE_SW_LKUP_LAST)
3823 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3824 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3825 else if (lkup_type == ICE_SW_LKUP_VLAN)
3826 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3827 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3829 return ICE_ERR_PARAM;
3831 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3832 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3834 return ICE_ERR_NO_MEMORY;
3835 for (i = 0; i < num_vsi; i++) {
3836 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3837 status = ICE_ERR_PARAM;
3840 /* AQ call requires hw_vsi_id(s) */
3841 s_rule->pdata.vsi_list.vsi[i] =
3842 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3845 s_rule->type = CPU_TO_LE16(rule_type);
3846 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3847 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3849 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3852 ice_free(hw, s_rule);
3857 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3858 * @hw: pointer to the HW struct
3859 * @vsi_handle_arr: array of VSI handles to form a VSI list
3860 * @num_vsi: number of VSI handles in the array
3861 * @vsi_list_id: stores the ID of the VSI list to be created
3862 * @lkup_type: switch rule filter's lookup type
3864 static enum ice_status
3865 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3866 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3868 enum ice_status status;
3870 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3871 ice_aqc_opc_alloc_res);
3875 /* Update the newly created VSI list to include the specified VSIs */
3876 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3877 *vsi_list_id, false,
3878 ice_aqc_opc_add_sw_rules, lkup_type);
3882 * ice_create_pkt_fwd_rule
3883 * @hw: pointer to the hardware structure
3884 * @recp_list: corresponding filter management list
3885 * @f_entry: entry containing packet forwarding information
3887 * Create switch rule with given filter information and add an entry
3888 * to the corresponding filter management list to track this switch rule
3891 static enum ice_status
3892 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3893 struct ice_fltr_list_entry *f_entry)
3895 struct ice_fltr_mgmt_list_entry *fm_entry;
3896 struct ice_aqc_sw_rules_elem *s_rule;
3897 enum ice_status status;
3899 s_rule = (struct ice_aqc_sw_rules_elem *)
3900 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3902 return ICE_ERR_NO_MEMORY;
3903 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3904 ice_malloc(hw, sizeof(*fm_entry));
3906 status = ICE_ERR_NO_MEMORY;
3907 goto ice_create_pkt_fwd_rule_exit;
3910 fm_entry->fltr_info = f_entry->fltr_info;
3912 /* Initialize all the fields for the management entry */
3913 fm_entry->vsi_count = 1;
3914 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3915 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3916 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3918 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3919 ice_aqc_opc_add_sw_rules);
3921 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3922 ice_aqc_opc_add_sw_rules, NULL);
3924 ice_free(hw, fm_entry);
3925 goto ice_create_pkt_fwd_rule_exit;
3928 f_entry->fltr_info.fltr_rule_id =
3929 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3930 fm_entry->fltr_info.fltr_rule_id =
3931 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3933 /* The book keeping entries will get removed when base driver
3934 * calls remove filter AQ command
3936 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3938 ice_create_pkt_fwd_rule_exit:
3939 ice_free(hw, s_rule);
3944 * ice_update_pkt_fwd_rule
3945 * @hw: pointer to the hardware structure
3946 * @f_info: filter information for switch rule
3948 * Call AQ command to update a previously created switch rule with a
3951 static enum ice_status
3952 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3954 struct ice_aqc_sw_rules_elem *s_rule;
3955 enum ice_status status;
3957 s_rule = (struct ice_aqc_sw_rules_elem *)
3958 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3960 return ICE_ERR_NO_MEMORY;
3962 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3964 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3966 /* Update switch rule with new rule set to forward VSI list */
3967 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3968 ice_aqc_opc_update_sw_rules, NULL);
3970 ice_free(hw, s_rule);
3975 * ice_update_sw_rule_bridge_mode
3976 * @hw: pointer to the HW struct
3978 * Updates unicast switch filter rules based on VEB/VEPA mode
3980 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3982 struct ice_switch_info *sw = hw->switch_info;
3983 struct ice_fltr_mgmt_list_entry *fm_entry;
3984 enum ice_status status = ICE_SUCCESS;
3985 struct LIST_HEAD_TYPE *rule_head;
3986 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3988 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3989 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3991 ice_acquire_lock(rule_lock);
3992 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3994 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3995 u8 *addr = fi->l_data.mac.mac_addr;
3997 /* Update unicast Tx rules to reflect the selected
4000 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4001 (fi->fltr_act == ICE_FWD_TO_VSI ||
4002 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4003 fi->fltr_act == ICE_FWD_TO_Q ||
4004 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4005 status = ice_update_pkt_fwd_rule(hw, fi);
4011 ice_release_lock(rule_lock);
4017 * ice_add_update_vsi_list
4018 * @hw: pointer to the hardware structure
4019 * @m_entry: pointer to current filter management list entry
4020 * @cur_fltr: filter information from the book keeping entry
4021 * @new_fltr: filter information with the new VSI to be added
4023 * Call AQ command to add or update previously created VSI list with new VSI.
4025 * Helper function to do book keeping associated with adding filter information
4026 * The algorithm to do the book keeping is described below :
4027 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4028 * if only one VSI has been added till now
4029 * Allocate a new VSI list and add two VSIs
4030 * to this list using switch rule command
4031 * Update the previously created switch rule with the
4032 * newly created VSI list ID
4033 * if a VSI list was previously created
4034 * Add the new VSI to the previously created VSI list set
4035 * using the update switch rule command
4037 static enum ice_status
4038 ice_add_update_vsi_list(struct ice_hw *hw,
4039 struct ice_fltr_mgmt_list_entry *m_entry,
4040 struct ice_fltr_info *cur_fltr,
4041 struct ice_fltr_info *new_fltr)
4043 enum ice_status status = ICE_SUCCESS;
4044 u16 vsi_list_id = 0;
4046 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4047 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4048 return ICE_ERR_NOT_IMPL;
4050 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4051 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4052 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4053 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4054 return ICE_ERR_NOT_IMPL;
4056 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4057 /* Only one entry existed in the mapping and it was not already
4058 * a part of a VSI list. So, create a VSI list with the old and
4061 struct ice_fltr_info tmp_fltr;
4062 u16 vsi_handle_arr[2];
4064 /* A rule already exists with the new VSI being added */
4065 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4066 return ICE_ERR_ALREADY_EXISTS;
4068 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4069 vsi_handle_arr[1] = new_fltr->vsi_handle;
4070 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4072 new_fltr->lkup_type);
4076 tmp_fltr = *new_fltr;
4077 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4078 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4079 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4080 /* Update the previous switch rule of "MAC forward to VSI" to
4081 * "MAC fwd to VSI list"
4083 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4087 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4088 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4089 m_entry->vsi_list_info =
4090 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4093 if (!m_entry->vsi_list_info)
4094 return ICE_ERR_NO_MEMORY;
4096 /* If this entry was large action then the large action needs
4097 * to be updated to point to FWD to VSI list
4099 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4101 ice_add_marker_act(hw, m_entry,
4102 m_entry->sw_marker_id,
4103 m_entry->lg_act_idx);
4105 u16 vsi_handle = new_fltr->vsi_handle;
4106 enum ice_adminq_opc opcode;
4108 if (!m_entry->vsi_list_info)
4111 /* A rule already exists with the new VSI being added */
4112 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4115 /* Update the previously created VSI list set with
4116 * the new VSI ID passed in
4118 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4119 opcode = ice_aqc_opc_update_sw_rules;
4121 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4122 vsi_list_id, false, opcode,
4123 new_fltr->lkup_type);
4124 /* update VSI list mapping info with new VSI ID */
4126 ice_set_bit(vsi_handle,
4127 m_entry->vsi_list_info->vsi_map);
4130 m_entry->vsi_count++;
4135 * ice_find_rule_entry - Search a rule entry
4136 * @list_head: head of rule list
4137 * @f_info: rule information
4139 * Helper function to search for a given rule entry
4140 * Returns pointer to entry storing the rule if found
4142 static struct ice_fltr_mgmt_list_entry *
4143 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4144 struct ice_fltr_info *f_info)
4146 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4148 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4150 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4151 sizeof(f_info->l_data)) &&
4152 f_info->flag == list_itr->fltr_info.flag) {
4161 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4162 * @recp_list: VSI lists needs to be searched
4163 * @vsi_handle: VSI handle to be found in VSI list
4164 * @vsi_list_id: VSI list ID found containing vsi_handle
4166 * Helper function to search a VSI list with single entry containing given VSI
4167 * handle element. This can be extended further to search VSI list with more
4168 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4170 static struct ice_vsi_list_map_info *
4171 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4174 struct ice_vsi_list_map_info *map_info = NULL;
4175 struct LIST_HEAD_TYPE *list_head;
4177 list_head = &recp_list->filt_rules;
4178 if (recp_list->adv_rule) {
4179 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4181 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4182 ice_adv_fltr_mgmt_list_entry,
4184 if (list_itr->vsi_list_info) {
4185 map_info = list_itr->vsi_list_info;
4186 if (ice_is_bit_set(map_info->vsi_map,
4188 *vsi_list_id = map_info->vsi_list_id;
4194 struct ice_fltr_mgmt_list_entry *list_itr;
4196 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4197 ice_fltr_mgmt_list_entry,
4199 if (list_itr->vsi_count == 1 &&
4200 list_itr->vsi_list_info) {
4201 map_info = list_itr->vsi_list_info;
4202 if (ice_is_bit_set(map_info->vsi_map,
4204 *vsi_list_id = map_info->vsi_list_id;
4214 * ice_add_rule_internal - add rule for a given lookup type
4215 * @hw: pointer to the hardware structure
4216 * @recp_list: recipe list for which rule has to be added
4217 * @lport: logic port number on which function add rule
4218 * @f_entry: structure containing MAC forwarding information
4220 * Adds or updates the rule lists for a given recipe
4222 static enum ice_status
4223 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4224 u8 lport, struct ice_fltr_list_entry *f_entry)
4226 struct ice_fltr_info *new_fltr, *cur_fltr;
4227 struct ice_fltr_mgmt_list_entry *m_entry;
4228 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4229 enum ice_status status = ICE_SUCCESS;
4231 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4232 return ICE_ERR_PARAM;
4234 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4235 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4236 f_entry->fltr_info.fwd_id.hw_vsi_id =
4237 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4239 rule_lock = &recp_list->filt_rule_lock;
4241 ice_acquire_lock(rule_lock);
4242 new_fltr = &f_entry->fltr_info;
4243 if (new_fltr->flag & ICE_FLTR_RX)
4244 new_fltr->src = lport;
4245 else if (new_fltr->flag & ICE_FLTR_TX)
4247 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4249 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4251 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4252 goto exit_add_rule_internal;
4255 cur_fltr = &m_entry->fltr_info;
4256 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4258 exit_add_rule_internal:
4259 ice_release_lock(rule_lock);
4264 * ice_remove_vsi_list_rule
4265 * @hw: pointer to the hardware structure
4266 * @vsi_list_id: VSI list ID generated as part of allocate resource
4267 * @lkup_type: switch rule filter lookup type
4269 * The VSI list should be emptied before this function is called to remove the
4272 static enum ice_status
4273 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4274 enum ice_sw_lkup_type lkup_type)
4276 /* Free the vsi_list resource that we allocated. It is assumed that the
4277 * list is empty at this point.
4279 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4280 ice_aqc_opc_free_res);
4284 * ice_rem_update_vsi_list
4285 * @hw: pointer to the hardware structure
4286 * @vsi_handle: VSI handle of the VSI to remove
4287 * @fm_list: filter management entry for which the VSI list management needs to
4290 static enum ice_status
4291 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4292 struct ice_fltr_mgmt_list_entry *fm_list)
4294 enum ice_sw_lkup_type lkup_type;
4295 enum ice_status status = ICE_SUCCESS;
4298 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4299 fm_list->vsi_count == 0)
4300 return ICE_ERR_PARAM;
4302 /* A rule with the VSI being removed does not exist */
4303 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4304 return ICE_ERR_DOES_NOT_EXIST;
4306 lkup_type = fm_list->fltr_info.lkup_type;
4307 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4308 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4309 ice_aqc_opc_update_sw_rules,
4314 fm_list->vsi_count--;
4315 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4317 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4318 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4319 struct ice_vsi_list_map_info *vsi_list_info =
4320 fm_list->vsi_list_info;
4323 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4325 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4326 return ICE_ERR_OUT_OF_RANGE;
4328 /* Make sure VSI list is empty before removing it below */
4329 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4331 ice_aqc_opc_update_sw_rules,
4336 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4337 tmp_fltr_info.fwd_id.hw_vsi_id =
4338 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4339 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4340 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4342 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4343 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4347 fm_list->fltr_info = tmp_fltr_info;
4350 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4351 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4352 struct ice_vsi_list_map_info *vsi_list_info =
4353 fm_list->vsi_list_info;
4355 /* Remove the VSI list since it is no longer used */
4356 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4358 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4359 vsi_list_id, status);
4363 LIST_DEL(&vsi_list_info->list_entry);
4364 ice_free(hw, vsi_list_info);
4365 fm_list->vsi_list_info = NULL;
4372 * ice_remove_rule_internal - Remove a filter rule of a given type
4374 * @hw: pointer to the hardware structure
4375 * @recp_list: recipe list for which the rule needs to removed
4376 * @f_entry: rule entry containing filter information
4378 static enum ice_status
4379 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4380 struct ice_fltr_list_entry *f_entry)
4382 struct ice_fltr_mgmt_list_entry *list_elem;
4383 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4384 enum ice_status status = ICE_SUCCESS;
4385 bool remove_rule = false;
4388 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4389 return ICE_ERR_PARAM;
4390 f_entry->fltr_info.fwd_id.hw_vsi_id =
4391 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4393 rule_lock = &recp_list->filt_rule_lock;
4394 ice_acquire_lock(rule_lock);
4395 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4396 &f_entry->fltr_info);
4398 status = ICE_ERR_DOES_NOT_EXIST;
4402 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4404 } else if (!list_elem->vsi_list_info) {
4405 status = ICE_ERR_DOES_NOT_EXIST;
4407 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4408 /* a ref_cnt > 1 indicates that the vsi_list is being
4409 * shared by multiple rules. Decrement the ref_cnt and
4410 * remove this rule, but do not modify the list, as it
4411 * is in-use by other rules.
4413 list_elem->vsi_list_info->ref_cnt--;
4416 /* a ref_cnt of 1 indicates the vsi_list is only used
4417 * by one rule. However, the original removal request is only
4418 * for a single VSI. Update the vsi_list first, and only
4419 * remove the rule if there are no further VSIs in this list.
4421 vsi_handle = f_entry->fltr_info.vsi_handle;
4422 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4425 /* if VSI count goes to zero after updating the VSI list */
4426 if (list_elem->vsi_count == 0)
4431 /* Remove the lookup rule */
4432 struct ice_aqc_sw_rules_elem *s_rule;
4434 s_rule = (struct ice_aqc_sw_rules_elem *)
4435 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4437 status = ICE_ERR_NO_MEMORY;
4441 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4442 ice_aqc_opc_remove_sw_rules);
4444 status = ice_aq_sw_rules(hw, s_rule,
4445 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4446 ice_aqc_opc_remove_sw_rules, NULL);
4448 /* Remove a book keeping from the list */
4449 ice_free(hw, s_rule);
4454 LIST_DEL(&list_elem->list_entry);
4455 ice_free(hw, list_elem);
4458 ice_release_lock(rule_lock);
4463 * ice_aq_get_res_alloc - get allocated resources
4464 * @hw: pointer to the HW struct
4465 * @num_entries: pointer to u16 to store the number of resource entries returned
4466 * @buf: pointer to buffer
4467 * @buf_size: size of buf
4468 * @cd: pointer to command details structure or NULL
4470 * The caller-supplied buffer must be large enough to store the resource
4471 * information for all resource types. Each resource type is an
4472 * ice_aqc_get_res_resp_elem structure.
4475 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4476 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4477 struct ice_sq_cd *cd)
4479 struct ice_aqc_get_res_alloc *resp;
4480 enum ice_status status;
4481 struct ice_aq_desc desc;
4484 return ICE_ERR_BAD_PTR;
4486 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4487 return ICE_ERR_INVAL_SIZE;
4489 resp = &desc.params.get_res;
4491 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4492 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4494 if (!status && num_entries)
4495 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4501 * ice_aq_get_res_descs - get allocated resource descriptors
4502 * @hw: pointer to the hardware structure
4503 * @num_entries: number of resource entries in buffer
4504 * @buf: structure to hold response data buffer
4505 * @buf_size: size of buffer
4506 * @res_type: resource type
4507 * @res_shared: is resource shared
4508 * @desc_id: input - first desc ID to start; output - next desc ID
4509 * @cd: pointer to command details structure or NULL
4512 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4513 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4514 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4516 struct ice_aqc_get_allocd_res_desc *cmd;
4517 struct ice_aq_desc desc;
4518 enum ice_status status;
4520 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4522 cmd = &desc.params.get_res_desc;
4525 return ICE_ERR_PARAM;
4527 if (buf_size != (num_entries * sizeof(*buf)))
4528 return ICE_ERR_PARAM;
4530 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4532 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4533 ICE_AQC_RES_TYPE_M) | (res_shared ?
4534 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4535 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4537 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4539 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4545 * ice_add_mac_rule - Add a MAC address based filter rule
4546 * @hw: pointer to the hardware structure
4547 * @m_list: list of MAC addresses and forwarding information
4548 * @sw: pointer to switch info struct for which function add rule
4549 * @lport: logic port number on which function add rule
4551 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4552 * multiple unicast addresses, the function assumes that all the
4553 * addresses are unique in a given add_mac call. It doesn't
4554 * check for duplicates in this case, removing duplicates from a given
4555 * list should be taken care of in the caller of this function.
4557 static enum ice_status
4558 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4559 struct ice_switch_info *sw, u8 lport)
4561 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4562 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4563 struct ice_fltr_list_entry *m_list_itr;
4564 struct LIST_HEAD_TYPE *rule_head;
4565 u16 total_elem_left, s_rule_size;
4566 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4567 enum ice_status status = ICE_SUCCESS;
4568 u16 num_unicast = 0;
4572 rule_lock = &recp_list->filt_rule_lock;
4573 rule_head = &recp_list->filt_rules;
4575 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4577 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4581 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4582 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4583 if (!ice_is_vsi_valid(hw, vsi_handle))
4584 return ICE_ERR_PARAM;
4585 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4586 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4587 /* update the src in case it is VSI num */
4588 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4589 return ICE_ERR_PARAM;
4590 m_list_itr->fltr_info.src = hw_vsi_id;
4591 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4592 IS_ZERO_ETHER_ADDR(add))
4593 return ICE_ERR_PARAM;
4594 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4595 /* Don't overwrite the unicast address */
4596 ice_acquire_lock(rule_lock);
4597 if (ice_find_rule_entry(rule_head,
4598 &m_list_itr->fltr_info)) {
4599 ice_release_lock(rule_lock);
4600 return ICE_ERR_ALREADY_EXISTS;
4602 ice_release_lock(rule_lock);
4604 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4605 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4606 m_list_itr->status =
4607 ice_add_rule_internal(hw, recp_list, lport,
4609 if (m_list_itr->status)
4610 return m_list_itr->status;
4614 ice_acquire_lock(rule_lock);
4615 /* Exit if no suitable entries were found for adding bulk switch rule */
4617 status = ICE_SUCCESS;
4618 goto ice_add_mac_exit;
4621 /* Allocate switch rule buffer for the bulk update for unicast */
4622 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4623 s_rule = (struct ice_aqc_sw_rules_elem *)
4624 ice_calloc(hw, num_unicast, s_rule_size);
4626 status = ICE_ERR_NO_MEMORY;
4627 goto ice_add_mac_exit;
4631 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4633 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4634 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4636 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4637 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4638 ice_aqc_opc_add_sw_rules);
4639 r_iter = (struct ice_aqc_sw_rules_elem *)
4640 ((u8 *)r_iter + s_rule_size);
4644 /* Call AQ bulk switch rule update for all unicast addresses */
4646 /* Call AQ switch rule in AQ_MAX chunk */
4647 for (total_elem_left = num_unicast; total_elem_left > 0;
4648 total_elem_left -= elem_sent) {
4649 struct ice_aqc_sw_rules_elem *entry = r_iter;
4651 elem_sent = MIN_T(u8, total_elem_left,
4652 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4653 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4654 elem_sent, ice_aqc_opc_add_sw_rules,
4657 goto ice_add_mac_exit;
4658 r_iter = (struct ice_aqc_sw_rules_elem *)
4659 ((u8 *)r_iter + (elem_sent * s_rule_size));
4662 /* Fill up rule ID based on the value returned from FW */
4664 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4666 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4667 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4668 struct ice_fltr_mgmt_list_entry *fm_entry;
4670 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4671 f_info->fltr_rule_id =
4672 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4673 f_info->fltr_act = ICE_FWD_TO_VSI;
4674 /* Create an entry to track this MAC address */
4675 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4676 ice_malloc(hw, sizeof(*fm_entry));
4678 status = ICE_ERR_NO_MEMORY;
4679 goto ice_add_mac_exit;
4681 fm_entry->fltr_info = *f_info;
4682 fm_entry->vsi_count = 1;
4683 /* The book keeping entries will get removed when
4684 * base driver calls remove filter AQ command
4687 LIST_ADD(&fm_entry->list_entry, rule_head);
4688 r_iter = (struct ice_aqc_sw_rules_elem *)
4689 ((u8 *)r_iter + s_rule_size);
4694 ice_release_lock(rule_lock);
4696 ice_free(hw, s_rule);
4701 * ice_add_mac - Add a MAC address based filter rule
4702 * @hw: pointer to the hardware structure
4703 * @m_list: list of MAC addresses and forwarding information
4705 * Function add MAC rule for logical port from HW struct
4707 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4710 return ICE_ERR_PARAM;
4712 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4713 hw->port_info->lport);
4717 * ice_add_vlan_internal - Add one VLAN based filter rule
4718 * @hw: pointer to the hardware structure
4719 * @recp_list: recipe list for which rule has to be added
4720 * @f_entry: filter entry containing one VLAN information
4722 static enum ice_status
4723 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4724 struct ice_fltr_list_entry *f_entry)
4726 struct ice_fltr_mgmt_list_entry *v_list_itr;
4727 struct ice_fltr_info *new_fltr, *cur_fltr;
4728 enum ice_sw_lkup_type lkup_type;
4729 u16 vsi_list_id = 0, vsi_handle;
4730 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4731 enum ice_status status = ICE_SUCCESS;
4733 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4734 return ICE_ERR_PARAM;
4736 f_entry->fltr_info.fwd_id.hw_vsi_id =
4737 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4738 new_fltr = &f_entry->fltr_info;
4740 /* VLAN ID should only be 12 bits */
4741 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4742 return ICE_ERR_PARAM;
4744 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4745 return ICE_ERR_PARAM;
4747 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4748 lkup_type = new_fltr->lkup_type;
4749 vsi_handle = new_fltr->vsi_handle;
4750 rule_lock = &recp_list->filt_rule_lock;
4751 ice_acquire_lock(rule_lock);
4752 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4754 struct ice_vsi_list_map_info *map_info = NULL;
4756 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4757 /* All VLAN pruning rules use a VSI list. Check if
4758 * there is already a VSI list containing VSI that we
4759 * want to add. If found, use the same vsi_list_id for
4760 * this new VLAN rule or else create a new list.
4762 map_info = ice_find_vsi_list_entry(recp_list,
4766 status = ice_create_vsi_list_rule(hw,
4774 /* Convert the action to forwarding to a VSI list. */
4775 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4776 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4779 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4781 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4784 status = ICE_ERR_DOES_NOT_EXIST;
4787 /* reuse VSI list for new rule and increment ref_cnt */
4789 v_list_itr->vsi_list_info = map_info;
4790 map_info->ref_cnt++;
4792 v_list_itr->vsi_list_info =
4793 ice_create_vsi_list_map(hw, &vsi_handle,
4797 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4798 /* Update existing VSI list to add new VSI ID only if it used
4801 cur_fltr = &v_list_itr->fltr_info;
4802 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4805 /* If VLAN rule exists and VSI list being used by this rule is
4806 * referenced by more than 1 VLAN rule. Then create a new VSI
4807 * list appending previous VSI with new VSI and update existing
4808 * VLAN rule to point to new VSI list ID
4810 struct ice_fltr_info tmp_fltr;
4811 u16 vsi_handle_arr[2];
4814 /* Current implementation only supports reusing VSI list with
4815 * one VSI count. We should never hit below condition
4817 if (v_list_itr->vsi_count > 1 &&
4818 v_list_itr->vsi_list_info->ref_cnt > 1) {
4819 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4820 status = ICE_ERR_CFG;
4825 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4828 /* A rule already exists with the new VSI being added */
4829 if (cur_handle == vsi_handle) {
4830 status = ICE_ERR_ALREADY_EXISTS;
4834 vsi_handle_arr[0] = cur_handle;
4835 vsi_handle_arr[1] = vsi_handle;
4836 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4837 &vsi_list_id, lkup_type);
4841 tmp_fltr = v_list_itr->fltr_info;
4842 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4843 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4844 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4845 /* Update the previous switch rule to a new VSI list which
4846 * includes current VSI that is requested
4848 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4852 /* before overriding VSI list map info. decrement ref_cnt of
4855 v_list_itr->vsi_list_info->ref_cnt--;
4857 /* now update to newly created list */
4858 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4859 v_list_itr->vsi_list_info =
4860 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4862 v_list_itr->vsi_count++;
4866 ice_release_lock(rule_lock);
4871 * ice_add_vlan_rule - Add VLAN based filter rule
4872 * @hw: pointer to the hardware structure
4873 * @v_list: list of VLAN entries and forwarding information
4874 * @sw: pointer to switch info struct for which function add rule
4876 static enum ice_status
4877 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4878 struct ice_switch_info *sw)
4880 struct ice_fltr_list_entry *v_list_itr;
4881 struct ice_sw_recipe *recp_list;
4883 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4884 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4886 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4887 return ICE_ERR_PARAM;
4888 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4889 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4891 if (v_list_itr->status)
4892 return v_list_itr->status;
4898 * ice_add_vlan - Add a VLAN based filter rule
4899 * @hw: pointer to the hardware structure
4900 * @v_list: list of VLAN and forwarding information
4902 * Function add VLAN rule for logical port from HW struct
4904 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4907 return ICE_ERR_PARAM;
4909 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4913 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4914 * @hw: pointer to the hardware structure
4915 * @mv_list: list of MAC and VLAN filters
4916 * @sw: pointer to switch info struct for which function add rule
4917 * @lport: logic port number on which function add rule
4919 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4920 * pruning bits enabled, then it is the responsibility of the caller to make
4921 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4922 * VLAN won't be received on that VSI otherwise.
4924 static enum ice_status
4925 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4926 struct ice_switch_info *sw, u8 lport)
4928 struct ice_fltr_list_entry *mv_list_itr;
4929 struct ice_sw_recipe *recp_list;
4931 if (!mv_list || !hw)
4932 return ICE_ERR_PARAM;
4934 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4935 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4937 enum ice_sw_lkup_type l_type =
4938 mv_list_itr->fltr_info.lkup_type;
4940 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4941 return ICE_ERR_PARAM;
4942 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4943 mv_list_itr->status =
4944 ice_add_rule_internal(hw, recp_list, lport,
4946 if (mv_list_itr->status)
4947 return mv_list_itr->status;
4953 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4954 * @hw: pointer to the hardware structure
4955 * @mv_list: list of MAC VLAN addresses and forwarding information
4957 * Function add MAC VLAN rule for logical port from HW struct
4960 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4962 if (!mv_list || !hw)
4963 return ICE_ERR_PARAM;
4965 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4966 hw->port_info->lport);
4970 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4971 * @hw: pointer to the hardware structure
4972 * @em_list: list of ether type MAC filter, MAC is optional
4973 * @sw: pointer to switch info struct for which function add rule
4974 * @lport: logic port number on which function add rule
4976 * This function requires the caller to populate the entries in
4977 * the filter list with the necessary fields (including flags to
4978 * indicate Tx or Rx rules).
4980 static enum ice_status
4981 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4982 struct ice_switch_info *sw, u8 lport)
4984 struct ice_fltr_list_entry *em_list_itr;
4986 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4988 struct ice_sw_recipe *recp_list;
4989 enum ice_sw_lkup_type l_type;
4991 l_type = em_list_itr->fltr_info.lkup_type;
4992 recp_list = &sw->recp_list[l_type];
4994 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4995 l_type != ICE_SW_LKUP_ETHERTYPE)
4996 return ICE_ERR_PARAM;
4998 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5001 if (em_list_itr->status)
5002 return em_list_itr->status;
5008 * ice_add_eth_mac - Add a ethertype based filter rule
5009 * @hw: pointer to the hardware structure
5010 * @em_list: list of ethertype and forwarding information
5012 * Function add ethertype rule for logical port from HW struct
5015 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5017 if (!em_list || !hw)
5018 return ICE_ERR_PARAM;
5020 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5021 hw->port_info->lport);
5025 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5026 * @hw: pointer to the hardware structure
5027 * @em_list: list of ethertype or ethertype MAC entries
5028 * @sw: pointer to switch info struct for which function add rule
5030 static enum ice_status
5031 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5032 struct ice_switch_info *sw)
5034 struct ice_fltr_list_entry *em_list_itr, *tmp;
5036 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5038 struct ice_sw_recipe *recp_list;
5039 enum ice_sw_lkup_type l_type;
5041 l_type = em_list_itr->fltr_info.lkup_type;
5043 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5044 l_type != ICE_SW_LKUP_ETHERTYPE)
5045 return ICE_ERR_PARAM;
5047 recp_list = &sw->recp_list[l_type];
5048 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5050 if (em_list_itr->status)
5051 return em_list_itr->status;
5057 * ice_remove_eth_mac - remove a ethertype based filter rule
5058 * @hw: pointer to the hardware structure
5059 * @em_list: list of ethertype and forwarding information
5063 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5065 if (!em_list || !hw)
5066 return ICE_ERR_PARAM;
5068 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5072 * ice_rem_sw_rule_info
5073 * @hw: pointer to the hardware structure
5074 * @rule_head: pointer to the switch list structure that we want to delete
5077 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5079 if (!LIST_EMPTY(rule_head)) {
5080 struct ice_fltr_mgmt_list_entry *entry;
5081 struct ice_fltr_mgmt_list_entry *tmp;
5083 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5084 ice_fltr_mgmt_list_entry, list_entry) {
5085 LIST_DEL(&entry->list_entry);
5086 ice_free(hw, entry);
5092 * ice_rem_adv_rule_info
5093 * @hw: pointer to the hardware structure
5094 * @rule_head: pointer to the switch list structure that we want to delete
5097 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5099 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5100 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5102 if (LIST_EMPTY(rule_head))
5105 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5106 ice_adv_fltr_mgmt_list_entry, list_entry) {
5107 LIST_DEL(&lst_itr->list_entry);
5108 ice_free(hw, lst_itr->lkups);
5109 ice_free(hw, lst_itr);
5114 * ice_rem_all_sw_rules_info
5115 * @hw: pointer to the hardware structure
5117 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5119 struct ice_switch_info *sw = hw->switch_info;
5122 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5123 struct LIST_HEAD_TYPE *rule_head;
5125 rule_head = &sw->recp_list[i].filt_rules;
5126 if (!sw->recp_list[i].adv_rule)
5127 ice_rem_sw_rule_info(hw, rule_head);
5129 ice_rem_adv_rule_info(hw, rule_head);
5130 if (sw->recp_list[i].adv_rule &&
5131 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5132 sw->recp_list[i].adv_rule = false;
5137 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5138 * @pi: pointer to the port_info structure
5139 * @vsi_handle: VSI handle to set as default
5140 * @set: true to add the above mentioned switch rule, false to remove it
5141 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5143 * add filter rule to set/unset given VSI as default VSI for the switch
5144 * (represented by swid)
5147 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5150 struct ice_aqc_sw_rules_elem *s_rule;
5151 struct ice_fltr_info f_info;
5152 struct ice_hw *hw = pi->hw;
5153 enum ice_adminq_opc opcode;
5154 enum ice_status status;
5158 if (!ice_is_vsi_valid(hw, vsi_handle))
5159 return ICE_ERR_PARAM;
5160 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5162 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5163 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5165 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5167 return ICE_ERR_NO_MEMORY;
5169 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5171 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5172 f_info.flag = direction;
5173 f_info.fltr_act = ICE_FWD_TO_VSI;
5174 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5176 if (f_info.flag & ICE_FLTR_RX) {
5177 f_info.src = pi->lport;
5178 f_info.src_id = ICE_SRC_ID_LPORT;
5180 f_info.fltr_rule_id =
5181 pi->dflt_rx_vsi_rule_id;
5182 } else if (f_info.flag & ICE_FLTR_TX) {
5183 f_info.src_id = ICE_SRC_ID_VSI;
5184 f_info.src = hw_vsi_id;
5186 f_info.fltr_rule_id =
5187 pi->dflt_tx_vsi_rule_id;
5191 opcode = ice_aqc_opc_add_sw_rules;
5193 opcode = ice_aqc_opc_remove_sw_rules;
5195 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5197 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5198 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5201 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5203 if (f_info.flag & ICE_FLTR_TX) {
5204 pi->dflt_tx_vsi_num = hw_vsi_id;
5205 pi->dflt_tx_vsi_rule_id = index;
5206 } else if (f_info.flag & ICE_FLTR_RX) {
5207 pi->dflt_rx_vsi_num = hw_vsi_id;
5208 pi->dflt_rx_vsi_rule_id = index;
5211 if (f_info.flag & ICE_FLTR_TX) {
5212 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5213 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5214 } else if (f_info.flag & ICE_FLTR_RX) {
5215 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5216 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5221 ice_free(hw, s_rule);
5226 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5227 * @list_head: head of rule list
5228 * @f_info: rule information
5230 * Helper function to search for a unicast rule entry - this is to be used
5231 * to remove unicast MAC filter that is not shared with other VSIs on the
5234 * Returns pointer to entry storing the rule if found
5236 static struct ice_fltr_mgmt_list_entry *
5237 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5238 struct ice_fltr_info *f_info)
5240 struct ice_fltr_mgmt_list_entry *list_itr;
5242 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5244 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5245 sizeof(f_info->l_data)) &&
5246 f_info->fwd_id.hw_vsi_id ==
5247 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5248 f_info->flag == list_itr->fltr_info.flag)
5255 * ice_remove_mac_rule - remove a MAC based filter rule
5256 * @hw: pointer to the hardware structure
5257 * @m_list: list of MAC addresses and forwarding information
5258 * @recp_list: list from which function remove MAC address
5260 * This function removes either a MAC filter rule or a specific VSI from a
5261 * VSI list for a multicast MAC address.
5263 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5264 * ice_add_mac. Caller should be aware that this call will only work if all
5265 * the entries passed into m_list were added previously. It will not attempt to
5266 * do a partial remove of entries that were found.
5268 static enum ice_status
5269 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5270 struct ice_sw_recipe *recp_list)
5272 struct ice_fltr_list_entry *list_itr, *tmp;
5273 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5276 return ICE_ERR_PARAM;
5278 rule_lock = &recp_list->filt_rule_lock;
5279 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5281 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5282 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5285 if (l_type != ICE_SW_LKUP_MAC)
5286 return ICE_ERR_PARAM;
5288 vsi_handle = list_itr->fltr_info.vsi_handle;
5289 if (!ice_is_vsi_valid(hw, vsi_handle))
5290 return ICE_ERR_PARAM;
5292 list_itr->fltr_info.fwd_id.hw_vsi_id =
5293 ice_get_hw_vsi_num(hw, vsi_handle);
5294 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5295 /* Don't remove the unicast address that belongs to
5296 * another VSI on the switch, since it is not being
5299 ice_acquire_lock(rule_lock);
5300 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5301 &list_itr->fltr_info)) {
5302 ice_release_lock(rule_lock);
5303 return ICE_ERR_DOES_NOT_EXIST;
5305 ice_release_lock(rule_lock);
5307 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5309 if (list_itr->status)
5310 return list_itr->status;
5316 * ice_remove_mac - remove a MAC address based filter rule
5317 * @hw: pointer to the hardware structure
5318 * @m_list: list of MAC addresses and forwarding information
5321 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5323 struct ice_sw_recipe *recp_list;
5325 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5326 return ice_remove_mac_rule(hw, m_list, recp_list);
5330 * ice_remove_vlan_rule - Remove VLAN based filter rule
5331 * @hw: pointer to the hardware structure
5332 * @v_list: list of VLAN entries and forwarding information
5333 * @recp_list: list from which function remove VLAN
5335 static enum ice_status
5336 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5337 struct ice_sw_recipe *recp_list)
5339 struct ice_fltr_list_entry *v_list_itr, *tmp;
5341 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5343 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5345 if (l_type != ICE_SW_LKUP_VLAN)
5346 return ICE_ERR_PARAM;
5347 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5349 if (v_list_itr->status)
5350 return v_list_itr->status;
5356 * ice_remove_vlan - remove a VLAN address based filter rule
5357 * @hw: pointer to the hardware structure
5358 * @v_list: list of VLAN and forwarding information
5362 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5364 struct ice_sw_recipe *recp_list;
5367 return ICE_ERR_PARAM;
5369 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5370 return ice_remove_vlan_rule(hw, v_list, recp_list);
5374 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5375 * @hw: pointer to the hardware structure
5376 * @v_list: list of MAC VLAN entries and forwarding information
5377 * @recp_list: list from which function remove MAC VLAN
5379 static enum ice_status
5380 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5381 struct ice_sw_recipe *recp_list)
5383 struct ice_fltr_list_entry *v_list_itr, *tmp;
5385 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5386 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5388 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5390 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5391 return ICE_ERR_PARAM;
5392 v_list_itr->status =
5393 ice_remove_rule_internal(hw, recp_list,
5395 if (v_list_itr->status)
5396 return v_list_itr->status;
5402 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5403 * @hw: pointer to the hardware structure
5404 * @mv_list: list of MAC VLAN and forwarding information
5407 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5409 struct ice_sw_recipe *recp_list;
5411 if (!mv_list || !hw)
5412 return ICE_ERR_PARAM;
5414 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5415 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5419 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5420 * @fm_entry: filter entry to inspect
5421 * @vsi_handle: VSI handle to compare with filter info
5424 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5426 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5427 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5428 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5429 fm_entry->vsi_list_info &&
5430 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5435 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5436 * @hw: pointer to the hardware structure
5437 * @vsi_handle: VSI handle to remove filters from
5438 * @vsi_list_head: pointer to the list to add entry to
5439 * @fi: pointer to fltr_info of filter entry to copy & add
5441 * Helper function, used when creating a list of filters to remove from
5442 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5443 * original filter entry, with the exception of fltr_info.fltr_act and
5444 * fltr_info.fwd_id fields. These are set such that later logic can
5445 * extract which VSI to remove the fltr from, and pass on that information.
5447 static enum ice_status
5448 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5449 struct LIST_HEAD_TYPE *vsi_list_head,
5450 struct ice_fltr_info *fi)
5452 struct ice_fltr_list_entry *tmp;
5454 /* this memory is freed up in the caller function
5455 * once filters for this VSI are removed
5457 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5459 return ICE_ERR_NO_MEMORY;
5461 tmp->fltr_info = *fi;
5463 /* Overwrite these fields to indicate which VSI to remove filter from,
5464 * so find and remove logic can extract the information from the
5465 * list entries. Note that original entries will still have proper
5468 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5469 tmp->fltr_info.vsi_handle = vsi_handle;
5470 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5472 LIST_ADD(&tmp->list_entry, vsi_list_head);
5478 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5479 * @hw: pointer to the hardware structure
5480 * @vsi_handle: VSI handle to remove filters from
5481 * @lkup_list_head: pointer to the list that has certain lookup type filters
5482 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5484 * Locates all filters in lkup_list_head that are used by the given VSI,
5485 * and adds COPIES of those entries to vsi_list_head (intended to be used
5486 * to remove the listed filters).
5487 * Note that this means all entries in vsi_list_head must be explicitly
5488 * deallocated by the caller when done with list.
5490 static enum ice_status
5491 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5492 struct LIST_HEAD_TYPE *lkup_list_head,
5493 struct LIST_HEAD_TYPE *vsi_list_head)
5495 struct ice_fltr_mgmt_list_entry *fm_entry;
5496 enum ice_status status = ICE_SUCCESS;
5498 /* check to make sure VSI ID is valid and within boundary */
5499 if (!ice_is_vsi_valid(hw, vsi_handle))
5500 return ICE_ERR_PARAM;
5502 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5503 ice_fltr_mgmt_list_entry, list_entry) {
5504 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5507 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5509 &fm_entry->fltr_info);
5517 * ice_determine_promisc_mask
5518 * @fi: filter info to parse
5520 * Helper function to determine which ICE_PROMISC_ mask corresponds
5521 * to given filter into.
5523 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5525 u16 vid = fi->l_data.mac_vlan.vlan_id;
5526 u8 *macaddr = fi->l_data.mac.mac_addr;
5527 bool is_tx_fltr = false;
5528 u8 promisc_mask = 0;
5530 if (fi->flag == ICE_FLTR_TX)
5533 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5534 promisc_mask |= is_tx_fltr ?
5535 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5536 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5537 promisc_mask |= is_tx_fltr ?
5538 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5539 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5540 promisc_mask |= is_tx_fltr ?
5541 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5543 promisc_mask |= is_tx_fltr ?
5544 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5546 return promisc_mask;
5550 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5551 * @hw: pointer to the hardware structure
5552 * @vsi_handle: VSI handle to retrieve info from
5553 * @promisc_mask: pointer to mask to be filled in
5554 * @vid: VLAN ID of promisc VLAN VSI
5555 * @sw: pointer to switch info struct for which function add rule
5557 static enum ice_status
5558 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5559 u16 *vid, struct ice_switch_info *sw)
5561 struct ice_fltr_mgmt_list_entry *itr;
5562 struct LIST_HEAD_TYPE *rule_head;
5563 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5565 if (!ice_is_vsi_valid(hw, vsi_handle))
5566 return ICE_ERR_PARAM;
5570 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5571 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5573 ice_acquire_lock(rule_lock);
5574 LIST_FOR_EACH_ENTRY(itr, rule_head,
5575 ice_fltr_mgmt_list_entry, list_entry) {
5576 /* Continue if this filter doesn't apply to this VSI or the
5577 * VSI ID is not in the VSI map for this filter
5579 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5582 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5584 ice_release_lock(rule_lock);
5590 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5591 * @hw: pointer to the hardware structure
5592 * @vsi_handle: VSI handle to retrieve info from
5593 * @promisc_mask: pointer to mask to be filled in
5594 * @vid: VLAN ID of promisc VLAN VSI
5597 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5600 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5601 vid, hw->switch_info);
5605 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5606 * @hw: pointer to the hardware structure
5607 * @vsi_handle: VSI handle to retrieve info from
5608 * @promisc_mask: pointer to mask to be filled in
5609 * @vid: VLAN ID of promisc VLAN VSI
5610 * @sw: pointer to switch info struct for which function add rule
5612 static enum ice_status
5613 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5614 u16 *vid, struct ice_switch_info *sw)
5616 struct ice_fltr_mgmt_list_entry *itr;
5617 struct LIST_HEAD_TYPE *rule_head;
5618 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5620 if (!ice_is_vsi_valid(hw, vsi_handle))
5621 return ICE_ERR_PARAM;
5625 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5626 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5628 ice_acquire_lock(rule_lock);
5629 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5631 /* Continue if this filter doesn't apply to this VSI or the
5632 * VSI ID is not in the VSI map for this filter
5634 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5637 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5639 ice_release_lock(rule_lock);
5645 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5646 * @hw: pointer to the hardware structure
5647 * @vsi_handle: VSI handle to retrieve info from
5648 * @promisc_mask: pointer to mask to be filled in
5649 * @vid: VLAN ID of promisc VLAN VSI
5652 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5655 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5656 vid, hw->switch_info);
5660 * ice_remove_promisc - Remove promisc based filter rules
5661 * @hw: pointer to the hardware structure
5662 * @recp_id: recipe ID for which the rule needs to removed
5663 * @v_list: list of promisc entries
5665 static enum ice_status
5666 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5667 struct LIST_HEAD_TYPE *v_list)
5669 struct ice_fltr_list_entry *v_list_itr, *tmp;
5670 struct ice_sw_recipe *recp_list;
5672 recp_list = &hw->switch_info->recp_list[recp_id];
5673 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5675 v_list_itr->status =
5676 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5677 if (v_list_itr->status)
5678 return v_list_itr->status;
5684 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5685 * @hw: pointer to the hardware structure
5686 * @vsi_handle: VSI handle to clear mode
5687 * @promisc_mask: mask of promiscuous config bits to clear
5688 * @vid: VLAN ID to clear VLAN promiscuous
5689 * @sw: pointer to switch info struct for which function add rule
5691 static enum ice_status
5692 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5693 u16 vid, struct ice_switch_info *sw)
5695 struct ice_fltr_list_entry *fm_entry, *tmp;
5696 struct LIST_HEAD_TYPE remove_list_head;
5697 struct ice_fltr_mgmt_list_entry *itr;
5698 struct LIST_HEAD_TYPE *rule_head;
5699 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5700 enum ice_status status = ICE_SUCCESS;
5703 if (!ice_is_vsi_valid(hw, vsi_handle))
5704 return ICE_ERR_PARAM;
5706 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5707 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5709 recipe_id = ICE_SW_LKUP_PROMISC;
5711 rule_head = &sw->recp_list[recipe_id].filt_rules;
5712 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5714 INIT_LIST_HEAD(&remove_list_head);
5716 ice_acquire_lock(rule_lock);
5717 LIST_FOR_EACH_ENTRY(itr, rule_head,
5718 ice_fltr_mgmt_list_entry, list_entry) {
5719 struct ice_fltr_info *fltr_info;
5720 u8 fltr_promisc_mask = 0;
5722 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5724 fltr_info = &itr->fltr_info;
5726 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5727 vid != fltr_info->l_data.mac_vlan.vlan_id)
5730 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5732 /* Skip if filter is not completely specified by given mask */
5733 if (fltr_promisc_mask & ~promisc_mask)
5736 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5740 ice_release_lock(rule_lock);
5741 goto free_fltr_list;
5744 ice_release_lock(rule_lock);
5746 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5749 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5750 ice_fltr_list_entry, list_entry) {
5751 LIST_DEL(&fm_entry->list_entry);
5752 ice_free(hw, fm_entry);
5759 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5760 * @hw: pointer to the hardware structure
5761 * @vsi_handle: VSI handle to clear mode
5762 * @promisc_mask: mask of promiscuous config bits to clear
5763 * @vid: VLAN ID to clear VLAN promiscuous
5766 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5767 u8 promisc_mask, u16 vid)
5769 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5770 vid, hw->switch_info);
5774 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5775 * @hw: pointer to the hardware structure
5776 * @vsi_handle: VSI handle to configure
5777 * @promisc_mask: mask of promiscuous config bits
5778 * @vid: VLAN ID to set VLAN promiscuous
5779 * @lport: logical port number to configure promisc mode
5780 * @sw: pointer to switch info struct for which function add rule
5782 static enum ice_status
5783 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5784 u16 vid, u8 lport, struct ice_switch_info *sw)
5786 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5787 struct ice_fltr_list_entry f_list_entry;
5788 struct ice_fltr_info new_fltr;
5789 enum ice_status status = ICE_SUCCESS;
5795 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5797 if (!ice_is_vsi_valid(hw, vsi_handle))
5798 return ICE_ERR_PARAM;
5799 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5801 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5803 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5804 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5805 new_fltr.l_data.mac_vlan.vlan_id = vid;
5806 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5808 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5809 recipe_id = ICE_SW_LKUP_PROMISC;
5812 /* Separate filters must be set for each direction/packet type
5813 * combination, so we will loop over the mask value, store the
5814 * individual type, and clear it out in the input mask as it
5817 while (promisc_mask) {
5818 struct ice_sw_recipe *recp_list;
5824 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5825 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5826 pkt_type = UCAST_FLTR;
5827 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5828 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5829 pkt_type = UCAST_FLTR;
5831 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5832 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5833 pkt_type = MCAST_FLTR;
5834 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5835 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5836 pkt_type = MCAST_FLTR;
5838 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5839 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5840 pkt_type = BCAST_FLTR;
5841 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5842 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5843 pkt_type = BCAST_FLTR;
5847 /* Check for VLAN promiscuous flag */
5848 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5849 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5850 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5851 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5855 /* Set filter DA based on packet type */
5856 mac_addr = new_fltr.l_data.mac.mac_addr;
5857 if (pkt_type == BCAST_FLTR) {
5858 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5859 } else if (pkt_type == MCAST_FLTR ||
5860 pkt_type == UCAST_FLTR) {
5861 /* Use the dummy ether header DA */
5862 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5863 ICE_NONDMA_TO_NONDMA);
5864 if (pkt_type == MCAST_FLTR)
5865 mac_addr[0] |= 0x1; /* Set multicast bit */
5868 /* Need to reset this to zero for all iterations */
5871 new_fltr.flag |= ICE_FLTR_TX;
5872 new_fltr.src = hw_vsi_id;
5874 new_fltr.flag |= ICE_FLTR_RX;
5875 new_fltr.src = lport;
5878 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5879 new_fltr.vsi_handle = vsi_handle;
5880 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5881 f_list_entry.fltr_info = new_fltr;
5882 recp_list = &sw->recp_list[recipe_id];
5884 status = ice_add_rule_internal(hw, recp_list, lport,
5886 if (status != ICE_SUCCESS)
5887 goto set_promisc_exit;
5895 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5896 * @hw: pointer to the hardware structure
5897 * @vsi_handle: VSI handle to configure
5898 * @promisc_mask: mask of promiscuous config bits
5899 * @vid: VLAN ID to set VLAN promiscuous
5902 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5905 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5906 hw->port_info->lport,
5911 * _ice_set_vlan_vsi_promisc
5912 * @hw: pointer to the hardware structure
5913 * @vsi_handle: VSI handle to configure
5914 * @promisc_mask: mask of promiscuous config bits
5915 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5916 * @lport: logical port number to configure promisc mode
5917 * @sw: pointer to switch info struct for which function add rule
5919 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5921 static enum ice_status
5922 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5923 bool rm_vlan_promisc, u8 lport,
5924 struct ice_switch_info *sw)
5926 struct ice_fltr_list_entry *list_itr, *tmp;
5927 struct LIST_HEAD_TYPE vsi_list_head;
5928 struct LIST_HEAD_TYPE *vlan_head;
5929 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5930 enum ice_status status;
5933 INIT_LIST_HEAD(&vsi_list_head);
5934 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5935 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5936 ice_acquire_lock(vlan_lock);
5937 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5939 ice_release_lock(vlan_lock);
5941 goto free_fltr_list;
5943 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5945 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5946 if (rm_vlan_promisc)
5947 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5951 status = _ice_set_vsi_promisc(hw, vsi_handle,
5952 promisc_mask, vlan_id,
5959 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5960 ice_fltr_list_entry, list_entry) {
5961 LIST_DEL(&list_itr->list_entry);
5962 ice_free(hw, list_itr);
5968 * ice_set_vlan_vsi_promisc
5969 * @hw: pointer to the hardware structure
5970 * @vsi_handle: VSI handle to configure
5971 * @promisc_mask: mask of promiscuous config bits
5972 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5974 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5977 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5978 bool rm_vlan_promisc)
5980 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5981 rm_vlan_promisc, hw->port_info->lport,
5986 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5987 * @hw: pointer to the hardware structure
5988 * @vsi_handle: VSI handle to remove filters from
5989 * @recp_list: recipe list from which function remove fltr
5990 * @lkup: switch rule filter lookup type
5993 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5994 struct ice_sw_recipe *recp_list,
5995 enum ice_sw_lkup_type lkup)
5997 struct ice_fltr_list_entry *fm_entry;
5998 struct LIST_HEAD_TYPE remove_list_head;
5999 struct LIST_HEAD_TYPE *rule_head;
6000 struct ice_fltr_list_entry *tmp;
6001 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6002 enum ice_status status;
6004 INIT_LIST_HEAD(&remove_list_head);
6005 rule_lock = &recp_list[lkup].filt_rule_lock;
6006 rule_head = &recp_list[lkup].filt_rules;
6007 ice_acquire_lock(rule_lock);
6008 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6010 ice_release_lock(rule_lock);
6015 case ICE_SW_LKUP_MAC:
6016 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6018 case ICE_SW_LKUP_VLAN:
6019 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6021 case ICE_SW_LKUP_PROMISC:
6022 case ICE_SW_LKUP_PROMISC_VLAN:
6023 ice_remove_promisc(hw, lkup, &remove_list_head);
6025 case ICE_SW_LKUP_MAC_VLAN:
6026 ice_remove_mac_vlan(hw, &remove_list_head);
6028 case ICE_SW_LKUP_ETHERTYPE:
6029 case ICE_SW_LKUP_ETHERTYPE_MAC:
6030 ice_remove_eth_mac(hw, &remove_list_head);
6032 case ICE_SW_LKUP_DFLT:
6033 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6035 case ICE_SW_LKUP_LAST:
6036 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6040 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6041 ice_fltr_list_entry, list_entry) {
6042 LIST_DEL(&fm_entry->list_entry);
6043 ice_free(hw, fm_entry);
6048 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6049 * @hw: pointer to the hardware structure
6050 * @vsi_handle: VSI handle to remove filters from
6051 * @sw: pointer to switch info struct
6054 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6055 struct ice_switch_info *sw)
6057 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6059 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6060 sw->recp_list, ICE_SW_LKUP_MAC);
6061 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6062 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6063 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6064 sw->recp_list, ICE_SW_LKUP_PROMISC);
6065 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6066 sw->recp_list, ICE_SW_LKUP_VLAN);
6067 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6068 sw->recp_list, ICE_SW_LKUP_DFLT);
6069 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6070 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6071 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6072 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6073 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6074 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6078 * ice_remove_vsi_fltr - Remove all filters for a VSI
6079 * @hw: pointer to the hardware structure
6080 * @vsi_handle: VSI handle to remove filters from
6082 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6084 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6088 * ice_alloc_res_cntr - allocating resource counter
6089 * @hw: pointer to the hardware structure
6090 * @type: type of resource
6091 * @alloc_shared: if set it is shared else dedicated
6092 * @num_items: number of entries requested for FD resource type
6093 * @counter_id: counter index returned by AQ call
6096 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6099 struct ice_aqc_alloc_free_res_elem *buf;
6100 enum ice_status status;
6103 /* Allocate resource */
6104 buf_len = ice_struct_size(buf, elem, 1);
6105 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6107 return ICE_ERR_NO_MEMORY;
6109 buf->num_elems = CPU_TO_LE16(num_items);
6110 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6111 ICE_AQC_RES_TYPE_M) | alloc_shared);
6113 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6114 ice_aqc_opc_alloc_res, NULL);
6118 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6126 * ice_free_res_cntr - free resource counter
6127 * @hw: pointer to the hardware structure
6128 * @type: type of resource
6129 * @alloc_shared: if set it is shared else dedicated
6130 * @num_items: number of entries to be freed for FD resource type
6131 * @counter_id: counter ID resource which needs to be freed
6134 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6137 struct ice_aqc_alloc_free_res_elem *buf;
6138 enum ice_status status;
6142 buf_len = ice_struct_size(buf, elem, 1);
6143 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6145 return ICE_ERR_NO_MEMORY;
6147 buf->num_elems = CPU_TO_LE16(num_items);
6148 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6149 ICE_AQC_RES_TYPE_M) | alloc_shared);
6150 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6152 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6153 ice_aqc_opc_free_res, NULL);
6155 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6162 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6163 * @hw: pointer to the hardware structure
6164 * @counter_id: returns counter index
6166 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6168 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6169 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6174 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6175 * @hw: pointer to the hardware structure
6176 * @counter_id: counter index to be freed
6178 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6180 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6181 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6186 * ice_alloc_res_lg_act - add large action resource
6187 * @hw: pointer to the hardware structure
6188 * @l_id: large action ID to fill it in
6189 * @num_acts: number of actions to hold with a large action entry
6191 static enum ice_status
6192 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6194 struct ice_aqc_alloc_free_res_elem *sw_buf;
6195 enum ice_status status;
6198 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6199 return ICE_ERR_PARAM;
6201 /* Allocate resource for large action */
6202 buf_len = ice_struct_size(sw_buf, elem, 1);
6203 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6205 return ICE_ERR_NO_MEMORY;
6207 sw_buf->num_elems = CPU_TO_LE16(1);
6209 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6210 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6211 * If num_acts is greater than 2, then use
6212 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6213 * The num_acts cannot exceed 4. This was ensured at the
6214 * beginning of the function.
6217 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6218 else if (num_acts == 2)
6219 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6221 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6223 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6224 ice_aqc_opc_alloc_res, NULL);
6226 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6228 ice_free(hw, sw_buf);
6233 * ice_add_mac_with_sw_marker - add filter with sw marker
6234 * @hw: pointer to the hardware structure
6235 * @f_info: filter info structure containing the MAC filter information
6236 * @sw_marker: sw marker to tag the Rx descriptor with
6239 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6242 struct ice_fltr_mgmt_list_entry *m_entry;
6243 struct ice_fltr_list_entry fl_info;
6244 struct ice_sw_recipe *recp_list;
6245 struct LIST_HEAD_TYPE l_head;
6246 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6247 enum ice_status ret;
6251 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6252 return ICE_ERR_PARAM;
6254 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6255 return ICE_ERR_PARAM;
6257 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6258 return ICE_ERR_PARAM;
6260 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6261 return ICE_ERR_PARAM;
6262 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6264 /* Add filter if it doesn't exist so then the adding of large
6265 * action always results in update
6268 INIT_LIST_HEAD(&l_head);
6269 fl_info.fltr_info = *f_info;
6270 LIST_ADD(&fl_info.list_entry, &l_head);
6272 entry_exists = false;
6273 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6274 hw->port_info->lport);
6275 if (ret == ICE_ERR_ALREADY_EXISTS)
6276 entry_exists = true;
6280 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6281 rule_lock = &recp_list->filt_rule_lock;
6282 ice_acquire_lock(rule_lock);
6283 /* Get the book keeping entry for the filter */
6284 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6288 /* If counter action was enabled for this rule then don't enable
6289 * sw marker large action
6291 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6292 ret = ICE_ERR_PARAM;
6296 /* if same marker was added before */
6297 if (m_entry->sw_marker_id == sw_marker) {
6298 ret = ICE_ERR_ALREADY_EXISTS;
6302 /* Allocate a hardware table entry to hold large act. Three actions
6303 * for marker based large action
6305 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6309 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6312 /* Update the switch rule to add the marker action */
6313 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6315 ice_release_lock(rule_lock);
6320 ice_release_lock(rule_lock);
6321 /* only remove entry if it did not exist previously */
6323 ret = ice_remove_mac(hw, &l_head);
6329 * ice_add_mac_with_counter - add filter with counter enabled
6330 * @hw: pointer to the hardware structure
6331 * @f_info: pointer to filter info structure containing the MAC filter
6335 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6337 struct ice_fltr_mgmt_list_entry *m_entry;
6338 struct ice_fltr_list_entry fl_info;
6339 struct ice_sw_recipe *recp_list;
6340 struct LIST_HEAD_TYPE l_head;
6341 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6342 enum ice_status ret;
6347 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6348 return ICE_ERR_PARAM;
6350 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6351 return ICE_ERR_PARAM;
6353 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6354 return ICE_ERR_PARAM;
6355 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6356 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6358 entry_exist = false;
6360 rule_lock = &recp_list->filt_rule_lock;
6362 /* Add filter if it doesn't exist so then the adding of large
6363 * action always results in update
6365 INIT_LIST_HEAD(&l_head);
6367 fl_info.fltr_info = *f_info;
6368 LIST_ADD(&fl_info.list_entry, &l_head);
6370 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6371 hw->port_info->lport);
6372 if (ret == ICE_ERR_ALREADY_EXISTS)
6377 ice_acquire_lock(rule_lock);
6378 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6380 ret = ICE_ERR_BAD_PTR;
6384 /* Don't enable counter for a filter for which sw marker was enabled */
6385 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6386 ret = ICE_ERR_PARAM;
6390 /* If a counter was already enabled then don't need to add again */
6391 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6392 ret = ICE_ERR_ALREADY_EXISTS;
6396 /* Allocate a hardware table entry to VLAN counter */
6397 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6401 /* Allocate a hardware table entry to hold large act. Two actions for
6402 * counter based large action
6404 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6408 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6411 /* Update the switch rule to add the counter action */
6412 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6414 ice_release_lock(rule_lock);
6419 ice_release_lock(rule_lock);
6420 /* only remove entry if it did not exist previously */
6422 ret = ice_remove_mac(hw, &l_head);
6427 /* This is mapping table entry that maps every word within a given protocol
6428 * structure to the real byte offset as per the specification of that
6430 * for example dst address is 3 words in ethertype header and corresponding
6431 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6432 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6433 * matching entry describing its field. This needs to be updated if new
6434 * structure is added to that union.
6436 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6437 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6438 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6439 { ICE_ETYPE_OL, { 0 } },
6440 { ICE_VLAN_OFOS, { 0, 2 } },
6441 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6442 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6443 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6444 26, 28, 30, 32, 34, 36, 38 } },
6445 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6446 26, 28, 30, 32, 34, 36, 38 } },
6447 { ICE_TCP_IL, { 0, 2 } },
6448 { ICE_UDP_OF, { 0, 2 } },
6449 { ICE_UDP_ILOS, { 0, 2 } },
6450 { ICE_SCTP_IL, { 0, 2 } },
6451 { ICE_VXLAN, { 8, 10, 12, 14 } },
6452 { ICE_GENEVE, { 8, 10, 12, 14 } },
6453 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6454 { ICE_NVGRE, { 0, 2, 4, 6 } },
6455 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
6456 { ICE_PPPOE, { 0, 2, 4, 6 } },
6457 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6458 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6459 { ICE_ESP, { 0, 2, 4, 6 } },
6460 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6461 { ICE_NAT_T, { 8, 10, 12, 14 } },
6462 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6463 { ICE_VLAN_EX, { 0, 2 } },
6466 /* The following table describes preferred grouping of recipes.
6467 * If a recipe that needs to be programmed is a superset or matches one of the
6468 * following combinations, then the recipe needs to be chained as per the
6472 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6473 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6474 { ICE_MAC_IL, ICE_MAC_IL_HW },
6475 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6476 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6477 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6478 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6479 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6480 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6481 { ICE_TCP_IL, ICE_TCP_IL_HW },
6482 { ICE_UDP_OF, ICE_UDP_OF_HW },
6483 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6484 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6485 { ICE_VXLAN, ICE_UDP_OF_HW },
6486 { ICE_GENEVE, ICE_UDP_OF_HW },
6487 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6488 { ICE_NVGRE, ICE_GRE_OF_HW },
6489 { ICE_GTP, ICE_UDP_OF_HW },
6490 { ICE_PPPOE, ICE_PPPOE_HW },
6491 { ICE_PFCP, ICE_UDP_ILOS_HW },
6492 { ICE_L2TPV3, ICE_L2TPV3_HW },
6493 { ICE_ESP, ICE_ESP_HW },
6494 { ICE_AH, ICE_AH_HW },
6495 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6496 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6497 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6501 * ice_find_recp - find a recipe
6502 * @hw: pointer to the hardware structure
6503 * @lkup_exts: extension sequence to match
6505 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6507 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6508 enum ice_sw_tunnel_type tun_type)
6510 bool refresh_required = true;
6511 struct ice_sw_recipe *recp;
6514 /* Walk through existing recipes to find a match */
6515 recp = hw->switch_info->recp_list;
6516 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6517 /* If recipe was not created for this ID, in SW bookkeeping,
6518 * check if FW has an entry for this recipe. If the FW has an
6519 * entry update it in our SW bookkeeping and continue with the
6522 if (!recp[i].recp_created)
6523 if (ice_get_recp_frm_fw(hw,
6524 hw->switch_info->recp_list, i,
6528 /* Skip inverse action recipes */
6529 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6530 ICE_AQ_RECIPE_ACT_INV_ACT)
6533 /* if number of words we are looking for match */
6534 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6535 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6536 struct ice_fv_word *be = lkup_exts->fv_words;
6537 u16 *cr = recp[i].lkup_exts.field_mask;
6538 u16 *de = lkup_exts->field_mask;
6542 /* ar, cr, and qr are related to the recipe words, while
6543 * be, de, and pe are related to the lookup words
6545 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6546 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6548 if (ar[qr].off == be[pe].off &&
6549 ar[qr].prot_id == be[pe].prot_id &&
6551 /* Found the "pe"th word in the
6556 /* After walking through all the words in the
6557 * "i"th recipe if "p"th word was not found then
6558 * this recipe is not what we are looking for.
6559 * So break out from this loop and try the next
6562 if (qr >= recp[i].lkup_exts.n_val_words) {
6567 /* If for "i"th recipe the found was never set to false
6568 * then it means we found our match
6570 if (tun_type == recp[i].tun_type && found)
6571 return i; /* Return the recipe ID */
6574 return ICE_MAX_NUM_RECIPES;
6578 * ice_prot_type_to_id - get protocol ID from protocol type
6579 * @type: protocol type
6580 * @id: pointer to variable that will receive the ID
6582 * Returns true if found, false otherwise
6584 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6588 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6589 if (ice_prot_id_tbl[i].type == type) {
6590 *id = ice_prot_id_tbl[i].protocol_id;
6597 * ice_find_valid_words - count valid words
6598 * @rule: advanced rule with lookup information
6599 * @lkup_exts: byte offset extractions of the words that are valid
6601 * calculate valid words in a lookup rule using mask value
6604 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6605 struct ice_prot_lkup_ext *lkup_exts)
6607 u8 j, word, prot_id, ret_val;
6609 if (!ice_prot_type_to_id(rule->type, &prot_id))
6612 word = lkup_exts->n_val_words;
6614 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6615 if (((u16 *)&rule->m_u)[j] &&
6616 rule->type < ARRAY_SIZE(ice_prot_ext)) {
6617 /* No more space to accommodate */
6618 if (word >= ICE_MAX_CHAIN_WORDS)
6620 lkup_exts->fv_words[word].off =
6621 ice_prot_ext[rule->type].offs[j];
6622 lkup_exts->fv_words[word].prot_id =
6623 ice_prot_id_tbl[rule->type].protocol_id;
6624 lkup_exts->field_mask[word] =
6625 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6629 ret_val = word - lkup_exts->n_val_words;
6630 lkup_exts->n_val_words = word;
6636 * ice_create_first_fit_recp_def - Create a recipe grouping
6637 * @hw: pointer to the hardware structure
6638 * @lkup_exts: an array of protocol header extractions
6639 * @rg_list: pointer to a list that stores new recipe groups
6640 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6642 * Using first fit algorithm, take all the words that are still not done
6643 * and start grouping them in 4-word groups. Each group makes up one
6646 static enum ice_status
6647 ice_create_first_fit_recp_def(struct ice_hw *hw,
6648 struct ice_prot_lkup_ext *lkup_exts,
6649 struct LIST_HEAD_TYPE *rg_list,
6652 struct ice_pref_recipe_group *grp = NULL;
6657 if (!lkup_exts->n_val_words) {
6658 struct ice_recp_grp_entry *entry;
6660 entry = (struct ice_recp_grp_entry *)
6661 ice_malloc(hw, sizeof(*entry));
6663 return ICE_ERR_NO_MEMORY;
6664 LIST_ADD(&entry->l_entry, rg_list);
6665 grp = &entry->r_group;
6667 grp->n_val_pairs = 0;
6670 /* Walk through every word in the rule to check if it is not done. If so
6671 * then this word needs to be part of a new recipe.
6673 for (j = 0; j < lkup_exts->n_val_words; j++)
6674 if (!ice_is_bit_set(lkup_exts->done, j)) {
6676 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6677 struct ice_recp_grp_entry *entry;
6679 entry = (struct ice_recp_grp_entry *)
6680 ice_malloc(hw, sizeof(*entry));
6682 return ICE_ERR_NO_MEMORY;
6683 LIST_ADD(&entry->l_entry, rg_list);
6684 grp = &entry->r_group;
6688 grp->pairs[grp->n_val_pairs].prot_id =
6689 lkup_exts->fv_words[j].prot_id;
6690 grp->pairs[grp->n_val_pairs].off =
6691 lkup_exts->fv_words[j].off;
6692 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6700 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6701 * @hw: pointer to the hardware structure
6702 * @fv_list: field vector with the extraction sequence information
6703 * @rg_list: recipe groupings with protocol-offset pairs
6705 * Helper function to fill in the field vector indices for protocol-offset
6706 * pairs. These indexes are then ultimately programmed into a recipe.
6708 static enum ice_status
6709 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6710 struct LIST_HEAD_TYPE *rg_list)
6712 struct ice_sw_fv_list_entry *fv;
6713 struct ice_recp_grp_entry *rg;
6714 struct ice_fv_word *fv_ext;
6716 if (LIST_EMPTY(fv_list))
6719 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6720 fv_ext = fv->fv_ptr->ew;
6722 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6725 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6726 struct ice_fv_word *pr;
6731 pr = &rg->r_group.pairs[i];
6732 mask = rg->r_group.mask[i];
6734 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6735 if (fv_ext[j].prot_id == pr->prot_id &&
6736 fv_ext[j].off == pr->off) {
6739 /* Store index of field vector */
6741 rg->fv_mask[i] = mask;
6745 /* Protocol/offset could not be found, caller gave an
6749 return ICE_ERR_PARAM;
6757 * ice_find_free_recp_res_idx - find free result indexes for recipe
6758 * @hw: pointer to hardware structure
6759 * @profiles: bitmap of profiles that will be associated with the new recipe
6760 * @free_idx: pointer to variable to receive the free index bitmap
6762 * The algorithm used here is:
6763 * 1. When creating a new recipe, create a set P which contains all
6764 * Profiles that will be associated with our new recipe
6766 * 2. For each Profile p in set P:
6767 * a. Add all recipes associated with Profile p into set R
6768 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6769 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6770 * i. Or just assume they all have the same possible indexes:
6772 * i.e., PossibleIndexes = 0x0000F00000000000
6774 * 3. For each Recipe r in set R:
6775 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6776 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6778 * FreeIndexes will contain the bits indicating the indexes free for use,
6779 * then the code needs to update the recipe[r].used_result_idx_bits to
6780 * indicate which indexes were selected for use by this recipe.
6783 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6784 ice_bitmap_t *free_idx)
6786 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6787 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6788 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6791 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6792 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6793 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6794 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6796 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6798 /* For each profile we are going to associate the recipe with, add the
6799 * recipes that are associated with that profile. This will give us
6800 * the set of recipes that our recipe may collide with. Also, determine
6801 * what possible result indexes are usable given this set of profiles.
6803 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6804 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6805 ICE_MAX_NUM_RECIPES);
6806 ice_and_bitmap(possible_idx, possible_idx,
6807 hw->switch_info->prof_res_bm[bit],
6811 /* For each recipe that our new recipe may collide with, determine
6812 * which indexes have been used.
6814 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6815 ice_or_bitmap(used_idx, used_idx,
6816 hw->switch_info->recp_list[bit].res_idxs,
6819 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6821 /* return number of free indexes */
6822 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6826 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6827 * @hw: pointer to hardware structure
6828 * @rm: recipe management list entry
6829 * @profiles: bitmap of profiles that will be associated.
6831 static enum ice_status
6832 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6833 ice_bitmap_t *profiles)
6835 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6836 struct ice_aqc_recipe_data_elem *tmp;
6837 struct ice_aqc_recipe_data_elem *buf;
6838 struct ice_recp_grp_entry *entry;
6839 enum ice_status status;
6845 /* When more than one recipe are required, another recipe is needed to
6846 * chain them together. Matching a tunnel metadata ID takes up one of
6847 * the match fields in the chaining recipe reducing the number of
6848 * chained recipes by one.
6850 /* check number of free result indices */
6851 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6852 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6854 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6855 free_res_idx, rm->n_grp_count);
6857 if (rm->n_grp_count > 1) {
6858 if (rm->n_grp_count > free_res_idx)
6859 return ICE_ERR_MAX_LIMIT;
6864 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6865 return ICE_ERR_MAX_LIMIT;
6867 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6868 ICE_MAX_NUM_RECIPES,
6871 return ICE_ERR_NO_MEMORY;
6873 buf = (struct ice_aqc_recipe_data_elem *)
6874 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6876 status = ICE_ERR_NO_MEMORY;
6880 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6881 recipe_count = ICE_MAX_NUM_RECIPES;
6882 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6884 if (status || recipe_count == 0)
6887 /* Allocate the recipe resources, and configure them according to the
6888 * match fields from protocol headers and extracted field vectors.
6890 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6891 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6894 status = ice_alloc_recipe(hw, &entry->rid);
6898 /* Clear the result index of the located recipe, as this will be
6899 * updated, if needed, later in the recipe creation process.
6901 tmp[0].content.result_indx = 0;
6903 buf[recps] = tmp[0];
6904 buf[recps].recipe_indx = (u8)entry->rid;
6905 /* if the recipe is a non-root recipe RID should be programmed
6906 * as 0 for the rules to be applied correctly.
6908 buf[recps].content.rid = 0;
6909 ice_memset(&buf[recps].content.lkup_indx, 0,
6910 sizeof(buf[recps].content.lkup_indx),
6913 /* All recipes use look-up index 0 to match switch ID. */
6914 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6915 buf[recps].content.mask[0] =
6916 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6917 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6920 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6921 buf[recps].content.lkup_indx[i] = 0x80;
6922 buf[recps].content.mask[i] = 0;
6925 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6926 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6927 buf[recps].content.mask[i + 1] =
6928 CPU_TO_LE16(entry->fv_mask[i]);
6931 if (rm->n_grp_count > 1) {
6932 /* Checks to see if there really is a valid result index
6935 if (chain_idx >= ICE_MAX_FV_WORDS) {
6936 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6937 status = ICE_ERR_MAX_LIMIT;
6941 entry->chain_idx = chain_idx;
6942 buf[recps].content.result_indx =
6943 ICE_AQ_RECIPE_RESULT_EN |
6944 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6945 ICE_AQ_RECIPE_RESULT_DATA_M);
6946 ice_clear_bit(chain_idx, result_idx_bm);
6947 chain_idx = ice_find_first_bit(result_idx_bm,
6951 /* fill recipe dependencies */
6952 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6953 ICE_MAX_NUM_RECIPES);
6954 ice_set_bit(buf[recps].recipe_indx,
6955 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6956 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6960 if (rm->n_grp_count == 1) {
6961 rm->root_rid = buf[0].recipe_indx;
6962 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6963 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6964 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6965 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6966 sizeof(buf[0].recipe_bitmap),
6967 ICE_NONDMA_TO_NONDMA);
6969 status = ICE_ERR_BAD_PTR;
6972 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6973 * the recipe which is getting created if specified
6974 * by user. Usually any advanced switch filter, which results
6975 * into new extraction sequence, ended up creating a new recipe
6976 * of type ROOT and usually recipes are associated with profiles
6977 * Switch rule referreing newly created recipe, needs to have
6978 * either/or 'fwd' or 'join' priority, otherwise switch rule
6979 * evaluation will not happen correctly. In other words, if
6980 * switch rule to be evaluated on priority basis, then recipe
6981 * needs to have priority, otherwise it will be evaluated last.
6983 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6985 struct ice_recp_grp_entry *last_chain_entry;
6988 /* Allocate the last recipe that will chain the outcomes of the
6989 * other recipes together
6991 status = ice_alloc_recipe(hw, &rid);
6995 buf[recps].recipe_indx = (u8)rid;
6996 buf[recps].content.rid = (u8)rid;
6997 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6998 /* the new entry created should also be part of rg_list to
6999 * make sure we have complete recipe
7001 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7002 sizeof(*last_chain_entry));
7003 if (!last_chain_entry) {
7004 status = ICE_ERR_NO_MEMORY;
7007 last_chain_entry->rid = rid;
7008 ice_memset(&buf[recps].content.lkup_indx, 0,
7009 sizeof(buf[recps].content.lkup_indx),
7011 /* All recipes use look-up index 0 to match switch ID. */
7012 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7013 buf[recps].content.mask[0] =
7014 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7015 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7016 buf[recps].content.lkup_indx[i] =
7017 ICE_AQ_RECIPE_LKUP_IGNORE;
7018 buf[recps].content.mask[i] = 0;
7022 /* update r_bitmap with the recp that is used for chaining */
7023 ice_set_bit(rid, rm->r_bitmap);
7024 /* this is the recipe that chains all the other recipes so it
7025 * should not have a chaining ID to indicate the same
7027 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7028 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7030 last_chain_entry->fv_idx[i] = entry->chain_idx;
7031 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7032 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7033 ice_set_bit(entry->rid, rm->r_bitmap);
7035 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7036 if (sizeof(buf[recps].recipe_bitmap) >=
7037 sizeof(rm->r_bitmap)) {
7038 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7039 sizeof(buf[recps].recipe_bitmap),
7040 ICE_NONDMA_TO_NONDMA);
7042 status = ICE_ERR_BAD_PTR;
7045 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7048 rm->root_rid = (u8)rid;
7050 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7054 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7055 ice_release_change_lock(hw);
7059 /* Every recipe that just got created add it to the recipe
7062 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7063 struct ice_switch_info *sw = hw->switch_info;
7064 bool is_root, idx_found = false;
7065 struct ice_sw_recipe *recp;
7066 u16 idx, buf_idx = 0;
7068 /* find buffer index for copying some data */
7069 for (idx = 0; idx < rm->n_grp_count; idx++)
7070 if (buf[idx].recipe_indx == entry->rid) {
7076 status = ICE_ERR_OUT_OF_RANGE;
7080 recp = &sw->recp_list[entry->rid];
7081 is_root = (rm->root_rid == entry->rid);
7082 recp->is_root = is_root;
7084 recp->root_rid = entry->rid;
7085 recp->big_recp = (is_root && rm->n_grp_count > 1);
7087 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7088 entry->r_group.n_val_pairs *
7089 sizeof(struct ice_fv_word),
7090 ICE_NONDMA_TO_NONDMA);
7092 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7093 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7095 /* Copy non-result fv index values and masks to recipe. This
7096 * call will also update the result recipe bitmask.
7098 ice_collect_result_idx(&buf[buf_idx], recp);
7100 /* for non-root recipes, also copy to the root, this allows
7101 * easier matching of a complete chained recipe
7104 ice_collect_result_idx(&buf[buf_idx],
7105 &sw->recp_list[rm->root_rid]);
7107 recp->n_ext_words = entry->r_group.n_val_pairs;
7108 recp->chain_idx = entry->chain_idx;
7109 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7110 recp->n_grp_count = rm->n_grp_count;
7111 recp->tun_type = rm->tun_type;
7112 recp->recp_created = true;
7126 * ice_create_recipe_group - creates recipe group
7127 * @hw: pointer to hardware structure
7128 * @rm: recipe management list entry
7129 * @lkup_exts: lookup elements
7131 static enum ice_status
7132 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7133 struct ice_prot_lkup_ext *lkup_exts)
7135 enum ice_status status;
7138 rm->n_grp_count = 0;
7140 /* Create recipes for words that are marked not done by packing them
7143 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7144 &rm->rg_list, &recp_count);
7146 rm->n_grp_count += recp_count;
7147 rm->n_ext_words = lkup_exts->n_val_words;
7148 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7149 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7150 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7151 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7158 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7159 * @hw: pointer to hardware structure
7160 * @lkups: lookup elements or match criteria for the advanced recipe, one
7161 * structure per protocol header
7162 * @lkups_cnt: number of protocols
7163 * @bm: bitmap of field vectors to consider
7164 * @fv_list: pointer to a list that holds the returned field vectors
7166 static enum ice_status
7167 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7168 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7170 enum ice_status status;
7177 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7179 return ICE_ERR_NO_MEMORY;
7181 for (i = 0; i < lkups_cnt; i++)
7182 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7183 status = ICE_ERR_CFG;
7187 /* Find field vectors that include all specified protocol types */
7188 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7191 ice_free(hw, prot_ids);
7196 * ice_tun_type_match_mask - determine if tun type needs a match mask
7197 * @tun_type: tunnel type
7198 * @mask: mask to be used for the tunnel
7200 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7203 case ICE_SW_TUN_VXLAN_GPE:
7204 case ICE_SW_TUN_GENEVE:
7205 case ICE_SW_TUN_VXLAN:
7206 case ICE_SW_TUN_NVGRE:
7207 case ICE_SW_TUN_UDP:
7208 case ICE_ALL_TUNNELS:
7209 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7210 case ICE_NON_TUN_QINQ:
7211 case ICE_SW_TUN_PPPOE_QINQ:
7212 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7213 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7214 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7215 *mask = ICE_TUN_FLAG_MASK;
7218 case ICE_SW_TUN_GENEVE_VLAN:
7219 case ICE_SW_TUN_VXLAN_VLAN:
7220 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7230 * ice_add_special_words - Add words that are not protocols, such as metadata
7231 * @rinfo: other information regarding the rule e.g. priority and action info
7232 * @lkup_exts: lookup word structure
7234 static enum ice_status
7235 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7236 struct ice_prot_lkup_ext *lkup_exts)
7240 /* If this is a tunneled packet, then add recipe index to match the
7241 * tunnel bit in the packet metadata flags.
7243 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7244 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7245 u8 word = lkup_exts->n_val_words++;
7247 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7248 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7249 lkup_exts->field_mask[word] = mask;
7251 return ICE_ERR_MAX_LIMIT;
7258 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7259 * @hw: pointer to hardware structure
7260 * @rinfo: other information regarding the rule e.g. priority and action info
7261 * @bm: pointer to memory for returning the bitmap of field vectors
7264 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7267 enum ice_prof_type prof_type;
7269 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7271 switch (rinfo->tun_type) {
7273 case ICE_NON_TUN_QINQ:
7274 prof_type = ICE_PROF_NON_TUN;
7276 case ICE_ALL_TUNNELS:
7277 prof_type = ICE_PROF_TUN_ALL;
7279 case ICE_SW_TUN_VXLAN_GPE:
7280 case ICE_SW_TUN_GENEVE:
7281 case ICE_SW_TUN_GENEVE_VLAN:
7282 case ICE_SW_TUN_VXLAN:
7283 case ICE_SW_TUN_VXLAN_VLAN:
7284 case ICE_SW_TUN_UDP:
7285 case ICE_SW_TUN_GTP:
7286 prof_type = ICE_PROF_TUN_UDP;
7288 case ICE_SW_TUN_NVGRE:
7289 prof_type = ICE_PROF_TUN_GRE;
7291 case ICE_SW_TUN_PPPOE:
7292 case ICE_SW_TUN_PPPOE_QINQ:
7293 prof_type = ICE_PROF_TUN_PPPOE;
7295 case ICE_SW_TUN_PPPOE_PAY:
7296 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7297 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7299 case ICE_SW_TUN_PPPOE_IPV4:
7300 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7301 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7302 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7303 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7305 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7306 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7308 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7309 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7311 case ICE_SW_TUN_PPPOE_IPV6:
7312 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7313 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7314 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7315 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7317 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7318 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7320 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7321 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7323 case ICE_SW_TUN_PROFID_IPV6_ESP:
7324 case ICE_SW_TUN_IPV6_ESP:
7325 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7327 case ICE_SW_TUN_PROFID_IPV6_AH:
7328 case ICE_SW_TUN_IPV6_AH:
7329 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7331 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7332 case ICE_SW_TUN_IPV6_L2TPV3:
7333 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7335 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7336 case ICE_SW_TUN_IPV6_NAT_T:
7337 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7339 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7340 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7342 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7343 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7345 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7346 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7348 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7349 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7351 case ICE_SW_TUN_IPV4_NAT_T:
7352 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7354 case ICE_SW_TUN_IPV4_L2TPV3:
7355 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7357 case ICE_SW_TUN_IPV4_ESP:
7358 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7360 case ICE_SW_TUN_IPV4_AH:
7361 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7363 case ICE_SW_IPV4_TCP:
7364 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7366 case ICE_SW_IPV4_UDP:
7367 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7369 case ICE_SW_IPV6_TCP:
7370 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7372 case ICE_SW_IPV6_UDP:
7373 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7375 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7376 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7377 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7378 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7379 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7380 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7381 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7383 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7384 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7385 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7386 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7387 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7388 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7389 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7391 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7392 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7393 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7394 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7395 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7396 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7397 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7399 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7400 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7401 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7402 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7403 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7404 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7405 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7407 case ICE_SW_TUN_AND_NON_TUN:
7408 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7410 prof_type = ICE_PROF_ALL;
7414 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7418 * ice_is_prof_rule - determine if rule type is a profile rule
7419 * @type: the rule type
7421 * if the rule type is a profile rule, that means that there no field value
7422 * match required, in this case just a profile hit is required.
7424 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7427 case ICE_SW_TUN_PROFID_IPV6_ESP:
7428 case ICE_SW_TUN_PROFID_IPV6_AH:
7429 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7430 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7431 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7432 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7433 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7434 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7444 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7445 * @hw: pointer to hardware structure
7446 * @lkups: lookup elements or match criteria for the advanced recipe, one
7447 * structure per protocol header
7448 * @lkups_cnt: number of protocols
7449 * @rinfo: other information regarding the rule e.g. priority and action info
7450 * @rid: return the recipe ID of the recipe created
7452 static enum ice_status
7453 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7454 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7456 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7457 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7458 struct ice_prot_lkup_ext *lkup_exts;
7459 struct ice_recp_grp_entry *r_entry;
7460 struct ice_sw_fv_list_entry *fvit;
7461 struct ice_recp_grp_entry *r_tmp;
7462 struct ice_sw_fv_list_entry *tmp;
7463 enum ice_status status = ICE_SUCCESS;
7464 struct ice_sw_recipe *rm;
7467 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7468 return ICE_ERR_PARAM;
7470 lkup_exts = (struct ice_prot_lkup_ext *)
7471 ice_malloc(hw, sizeof(*lkup_exts));
7473 return ICE_ERR_NO_MEMORY;
7475 /* Determine the number of words to be matched and if it exceeds a
7476 * recipe's restrictions
7478 for (i = 0; i < lkups_cnt; i++) {
7481 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7482 status = ICE_ERR_CFG;
7483 goto err_free_lkup_exts;
7486 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7488 status = ICE_ERR_CFG;
7489 goto err_free_lkup_exts;
7493 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7495 status = ICE_ERR_NO_MEMORY;
7496 goto err_free_lkup_exts;
7499 /* Get field vectors that contain fields extracted from all the protocol
7500 * headers being programmed.
7502 INIT_LIST_HEAD(&rm->fv_list);
7503 INIT_LIST_HEAD(&rm->rg_list);
7505 /* Get bitmap of field vectors (profiles) that are compatible with the
7506 * rule request; only these will be searched in the subsequent call to
7509 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7511 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7515 /* Create any special protocol/offset pairs, such as looking at tunnel
7516 * bits by extracting metadata
7518 status = ice_add_special_words(rinfo, lkup_exts);
7520 goto err_free_lkup_exts;
7522 /* Group match words into recipes using preferred recipe grouping
7525 status = ice_create_recipe_group(hw, rm, lkup_exts);
7529 /* set the recipe priority if specified */
7530 rm->priority = (u8)rinfo->priority;
7532 /* Find offsets from the field vector. Pick the first one for all the
7535 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7539 /* An empty FV list means to use all the profiles returned in the
7542 if (LIST_EMPTY(&rm->fv_list)) {
7545 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7546 struct ice_sw_fv_list_entry *fvl;
7548 fvl = (struct ice_sw_fv_list_entry *)
7549 ice_malloc(hw, sizeof(*fvl));
7553 fvl->profile_id = j;
7554 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7558 /* get bitmap of all profiles the recipe will be associated with */
7559 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7560 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7562 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7563 ice_set_bit((u16)fvit->profile_id, profiles);
7566 /* Look for a recipe which matches our requested fv / mask list */
7567 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7568 if (*rid < ICE_MAX_NUM_RECIPES)
7569 /* Success if found a recipe that match the existing criteria */
7572 rm->tun_type = rinfo->tun_type;
7573 /* Recipe we need does not exist, add a recipe */
7574 status = ice_add_sw_recipe(hw, rm, profiles);
7578 /* Associate all the recipes created with all the profiles in the
7579 * common field vector.
7581 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7583 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7586 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7587 (u8 *)r_bitmap, NULL);
7591 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7592 ICE_MAX_NUM_RECIPES);
7593 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7597 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7600 ice_release_change_lock(hw);
7605 /* Update profile to recipe bitmap array */
7606 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7607 ICE_MAX_NUM_RECIPES);
7609 /* Update recipe to profile bitmap array */
7610 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7611 ice_set_bit((u16)fvit->profile_id,
7612 recipe_to_profile[j]);
7615 *rid = rm->root_rid;
7616 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7617 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7619 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7620 ice_recp_grp_entry, l_entry) {
7621 LIST_DEL(&r_entry->l_entry);
7622 ice_free(hw, r_entry);
7625 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7627 LIST_DEL(&fvit->list_entry);
7632 ice_free(hw, rm->root_buf);
7637 ice_free(hw, lkup_exts);
7643 * ice_find_dummy_packet - find dummy packet by tunnel type
7645 * @lkups: lookup elements or match criteria for the advanced recipe, one
7646 * structure per protocol header
7647 * @lkups_cnt: number of protocols
7648 * @tun_type: tunnel type from the match criteria
7649 * @pkt: dummy packet to fill according to filter match criteria
7650 * @pkt_len: packet length of dummy packet
7651 * @offsets: pointer to receive the pointer to the offsets for the packet
7654 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7655 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7657 const struct ice_dummy_pkt_offsets **offsets)
7659 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7663 for (i = 0; i < lkups_cnt; i++) {
7664 if (lkups[i].type == ICE_UDP_ILOS)
7666 else if (lkups[i].type == ICE_TCP_IL)
7668 else if (lkups[i].type == ICE_IPV6_OFOS)
7670 else if (lkups[i].type == ICE_VLAN_OFOS)
7672 else if (lkups[i].type == ICE_IPV4_OFOS &&
7673 lkups[i].h_u.ipv4_hdr.protocol ==
7674 ICE_IPV4_NVGRE_PROTO_ID &&
7675 lkups[i].m_u.ipv4_hdr.protocol ==
7678 else if (lkups[i].type == ICE_PPPOE &&
7679 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7680 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7681 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7684 else if (lkups[i].type == ICE_ETYPE_OL &&
7685 lkups[i].h_u.ethertype.ethtype_id ==
7686 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7687 lkups[i].m_u.ethertype.ethtype_id ==
7690 else if (lkups[i].type == ICE_IPV4_IL &&
7691 lkups[i].h_u.ipv4_hdr.protocol ==
7693 lkups[i].m_u.ipv4_hdr.protocol ==
7698 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7699 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7700 *pkt = dummy_qinq_ipv6_pkt;
7701 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7702 *offsets = dummy_qinq_ipv6_packet_offsets;
7704 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7705 tun_type == ICE_NON_TUN_QINQ) {
7706 *pkt = dummy_qinq_ipv4_pkt;
7707 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7708 *offsets = dummy_qinq_ipv4_packet_offsets;
7712 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7713 *pkt = dummy_qinq_pppoe_ipv6_packet;
7714 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7715 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7717 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7718 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7719 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7720 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7722 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7723 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7724 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7725 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7726 *offsets = dummy_qinq_pppoe_packet_offsets;
7730 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7731 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7732 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7733 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7735 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7736 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7737 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7738 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7740 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7741 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7742 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7743 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7745 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7746 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7747 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7748 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7750 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7751 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7752 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7753 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7755 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7756 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7757 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7758 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7762 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7763 *pkt = dummy_ipv4_esp_pkt;
7764 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7765 *offsets = dummy_ipv4_esp_packet_offsets;
7769 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7770 *pkt = dummy_ipv6_esp_pkt;
7771 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7772 *offsets = dummy_ipv6_esp_packet_offsets;
7776 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7777 *pkt = dummy_ipv4_ah_pkt;
7778 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7779 *offsets = dummy_ipv4_ah_packet_offsets;
7783 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7784 *pkt = dummy_ipv6_ah_pkt;
7785 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7786 *offsets = dummy_ipv6_ah_packet_offsets;
7790 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7791 *pkt = dummy_ipv4_nat_pkt;
7792 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7793 *offsets = dummy_ipv4_nat_packet_offsets;
7797 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7798 *pkt = dummy_ipv6_nat_pkt;
7799 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7800 *offsets = dummy_ipv6_nat_packet_offsets;
7804 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7805 *pkt = dummy_ipv4_l2tpv3_pkt;
7806 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7807 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7811 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7812 *pkt = dummy_ipv6_l2tpv3_pkt;
7813 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7814 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7818 if (tun_type == ICE_SW_TUN_GTP) {
7819 *pkt = dummy_udp_gtp_packet;
7820 *pkt_len = sizeof(dummy_udp_gtp_packet);
7821 *offsets = dummy_udp_gtp_packet_offsets;
7825 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7826 *pkt = dummy_pppoe_ipv6_packet;
7827 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7828 *offsets = dummy_pppoe_packet_offsets;
7830 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7831 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7832 *pkt = dummy_pppoe_ipv4_packet;
7833 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7834 *offsets = dummy_pppoe_packet_offsets;
7838 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7839 *pkt = dummy_pppoe_ipv4_packet;
7840 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7841 *offsets = dummy_pppoe_packet_ipv4_offsets;
7845 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7846 *pkt = dummy_pppoe_ipv4_tcp_packet;
7847 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7848 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7852 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7853 *pkt = dummy_pppoe_ipv4_udp_packet;
7854 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7855 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7859 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7860 *pkt = dummy_pppoe_ipv6_packet;
7861 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7862 *offsets = dummy_pppoe_packet_ipv6_offsets;
7866 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7867 *pkt = dummy_pppoe_ipv6_tcp_packet;
7868 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7869 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7873 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7874 *pkt = dummy_pppoe_ipv6_udp_packet;
7875 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7876 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7880 if (tun_type == ICE_SW_IPV4_TCP) {
7881 *pkt = dummy_tcp_packet;
7882 *pkt_len = sizeof(dummy_tcp_packet);
7883 *offsets = dummy_tcp_packet_offsets;
7887 if (tun_type == ICE_SW_IPV4_UDP) {
7888 *pkt = dummy_udp_packet;
7889 *pkt_len = sizeof(dummy_udp_packet);
7890 *offsets = dummy_udp_packet_offsets;
7894 if (tun_type == ICE_SW_IPV6_TCP) {
7895 *pkt = dummy_tcp_ipv6_packet;
7896 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7897 *offsets = dummy_tcp_ipv6_packet_offsets;
7901 if (tun_type == ICE_SW_IPV6_UDP) {
7902 *pkt = dummy_udp_ipv6_packet;
7903 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7904 *offsets = dummy_udp_ipv6_packet_offsets;
7908 if (tun_type == ICE_ALL_TUNNELS) {
7909 *pkt = dummy_gre_udp_packet;
7910 *pkt_len = sizeof(dummy_gre_udp_packet);
7911 *offsets = dummy_gre_udp_packet_offsets;
7915 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7917 *pkt = dummy_gre_tcp_packet;
7918 *pkt_len = sizeof(dummy_gre_tcp_packet);
7919 *offsets = dummy_gre_tcp_packet_offsets;
7923 *pkt = dummy_gre_udp_packet;
7924 *pkt_len = sizeof(dummy_gre_udp_packet);
7925 *offsets = dummy_gre_udp_packet_offsets;
7929 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7930 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7931 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7932 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7934 *pkt = dummy_udp_tun_tcp_packet;
7935 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7936 *offsets = dummy_udp_tun_tcp_packet_offsets;
7940 *pkt = dummy_udp_tun_udp_packet;
7941 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7942 *offsets = dummy_udp_tun_udp_packet_offsets;
7948 *pkt = dummy_vlan_udp_packet;
7949 *pkt_len = sizeof(dummy_vlan_udp_packet);
7950 *offsets = dummy_vlan_udp_packet_offsets;
7953 *pkt = dummy_udp_packet;
7954 *pkt_len = sizeof(dummy_udp_packet);
7955 *offsets = dummy_udp_packet_offsets;
7957 } else if (udp && ipv6) {
7959 *pkt = dummy_vlan_udp_ipv6_packet;
7960 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7961 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7964 *pkt = dummy_udp_ipv6_packet;
7965 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7966 *offsets = dummy_udp_ipv6_packet_offsets;
7968 } else if ((tcp && ipv6) || ipv6) {
7970 *pkt = dummy_vlan_tcp_ipv6_packet;
7971 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7972 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7975 *pkt = dummy_tcp_ipv6_packet;
7976 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7977 *offsets = dummy_tcp_ipv6_packet_offsets;
7982 *pkt = dummy_vlan_tcp_packet;
7983 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7984 *offsets = dummy_vlan_tcp_packet_offsets;
7986 *pkt = dummy_tcp_packet;
7987 *pkt_len = sizeof(dummy_tcp_packet);
7988 *offsets = dummy_tcp_packet_offsets;
7993 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7995 * @lkups: lookup elements or match criteria for the advanced recipe, one
7996 * structure per protocol header
7997 * @lkups_cnt: number of protocols
7998 * @s_rule: stores rule information from the match criteria
7999 * @dummy_pkt: dummy packet to fill according to filter match criteria
8000 * @pkt_len: packet length of dummy packet
8001 * @offsets: offset info for the dummy packet
8003 static enum ice_status
8004 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8005 struct ice_aqc_sw_rules_elem *s_rule,
8006 const u8 *dummy_pkt, u16 pkt_len,
8007 const struct ice_dummy_pkt_offsets *offsets)
8012 /* Start with a packet with a pre-defined/dummy content. Then, fill
8013 * in the header values to be looked up or matched.
8015 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8017 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8019 for (i = 0; i < lkups_cnt; i++) {
8020 enum ice_protocol_type type;
8021 u16 offset = 0, len = 0, j;
8024 /* find the start of this layer; it should be found since this
8025 * was already checked when search for the dummy packet
8027 type = lkups[i].type;
8028 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8029 if (type == offsets[j].type) {
8030 offset = offsets[j].offset;
8035 /* this should never happen in a correct calling sequence */
8037 return ICE_ERR_PARAM;
8039 switch (lkups[i].type) {
8042 len = sizeof(struct ice_ether_hdr);
8045 len = sizeof(struct ice_ethtype_hdr);
8049 len = sizeof(struct ice_vlan_hdr);
8053 len = sizeof(struct ice_ipv4_hdr);
8057 len = sizeof(struct ice_ipv6_hdr);
8062 len = sizeof(struct ice_l4_hdr);
8065 len = sizeof(struct ice_sctp_hdr);
8068 len = sizeof(struct ice_nvgre);
8073 len = sizeof(struct ice_udp_tnl_hdr);
8077 case ICE_GTP_NO_PAY:
8078 len = sizeof(struct ice_udp_gtp_hdr);
8081 len = sizeof(struct ice_pppoe_hdr);
8084 len = sizeof(struct ice_esp_hdr);
8087 len = sizeof(struct ice_nat_t_hdr);
8090 len = sizeof(struct ice_ah_hdr);
8093 len = sizeof(struct ice_l2tpv3_sess_hdr);
8096 return ICE_ERR_PARAM;
8099 /* the length should be a word multiple */
8100 if (len % ICE_BYTES_PER_WORD)
8103 /* We have the offset to the header start, the length, the
8104 * caller's header values and mask. Use this information to
8105 * copy the data into the dummy packet appropriately based on
8106 * the mask. Note that we need to only write the bits as
8107 * indicated by the mask to make sure we don't improperly write
8108 * over any significant packet data.
8110 for (j = 0; j < len / sizeof(u16); j++)
8111 if (((u16 *)&lkups[i].m_u)[j])
8112 ((u16 *)(pkt + offset))[j] =
8113 (((u16 *)(pkt + offset))[j] &
8114 ~((u16 *)&lkups[i].m_u)[j]) |
8115 (((u16 *)&lkups[i].h_u)[j] &
8116 ((u16 *)&lkups[i].m_u)[j]);
8119 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8125 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8126 * @hw: pointer to the hardware structure
8127 * @tun_type: tunnel type
8128 * @pkt: dummy packet to fill in
8129 * @offsets: offset info for the dummy packet
8131 static enum ice_status
8132 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8133 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8138 case ICE_SW_TUN_AND_NON_TUN:
8139 case ICE_SW_TUN_VXLAN_GPE:
8140 case ICE_SW_TUN_VXLAN:
8141 case ICE_SW_TUN_VXLAN_VLAN:
8142 case ICE_SW_TUN_UDP:
8143 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8147 case ICE_SW_TUN_GENEVE:
8148 case ICE_SW_TUN_GENEVE_VLAN:
8149 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8154 /* Nothing needs to be done for this tunnel type */
8158 /* Find the outer UDP protocol header and insert the port number */
8159 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8160 if (offsets[i].type == ICE_UDP_OF) {
8161 struct ice_l4_hdr *hdr;
8164 offset = offsets[i].offset;
8165 hdr = (struct ice_l4_hdr *)&pkt[offset];
8166 hdr->dst_port = CPU_TO_BE16(open_port);
8176 * ice_find_adv_rule_entry - Search a rule entry
8177 * @hw: pointer to the hardware structure
8178 * @lkups: lookup elements or match criteria for the advanced recipe, one
8179 * structure per protocol header
8180 * @lkups_cnt: number of protocols
8181 * @recp_id: recipe ID for which we are finding the rule
8182 * @rinfo: other information regarding the rule e.g. priority and action info
8184 * Helper function to search for a given advance rule entry
8185 * Returns pointer to entry storing the rule if found
8187 static struct ice_adv_fltr_mgmt_list_entry *
8188 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8189 u16 lkups_cnt, u16 recp_id,
8190 struct ice_adv_rule_info *rinfo)
8192 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8193 struct ice_switch_info *sw = hw->switch_info;
8196 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8197 ice_adv_fltr_mgmt_list_entry, list_entry) {
8198 bool lkups_matched = true;
8200 if (lkups_cnt != list_itr->lkups_cnt)
8202 for (i = 0; i < list_itr->lkups_cnt; i++)
8203 if (memcmp(&list_itr->lkups[i], &lkups[i],
8205 lkups_matched = false;
8208 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8209 rinfo->tun_type == list_itr->rule_info.tun_type &&
8217 * ice_adv_add_update_vsi_list
8218 * @hw: pointer to the hardware structure
8219 * @m_entry: pointer to current adv filter management list entry
8220 * @cur_fltr: filter information from the book keeping entry
8221 * @new_fltr: filter information with the new VSI to be added
8223 * Call AQ command to add or update previously created VSI list with new VSI.
8225 * Helper function to do book keeping associated with adding filter information
8226 * The algorithm to do the booking keeping is described below :
8227 * When a VSI needs to subscribe to a given advanced filter
8228 * if only one VSI has been added till now
8229 * Allocate a new VSI list and add two VSIs
8230 * to this list using switch rule command
8231 * Update the previously created switch rule with the
8232 * newly created VSI list ID
8233 * if a VSI list was previously created
8234 * Add the new VSI to the previously created VSI list set
8235 * using the update switch rule command
8237 static enum ice_status
8238 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8239 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8240 struct ice_adv_rule_info *cur_fltr,
8241 struct ice_adv_rule_info *new_fltr)
8243 enum ice_status status;
8244 u16 vsi_list_id = 0;
8246 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8247 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8248 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8249 return ICE_ERR_NOT_IMPL;
8251 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8252 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8253 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8254 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8255 return ICE_ERR_NOT_IMPL;
8257 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8258 /* Only one entry existed in the mapping and it was not already
8259 * a part of a VSI list. So, create a VSI list with the old and
8262 struct ice_fltr_info tmp_fltr;
8263 u16 vsi_handle_arr[2];
8265 /* A rule already exists with the new VSI being added */
8266 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8267 new_fltr->sw_act.fwd_id.hw_vsi_id)
8268 return ICE_ERR_ALREADY_EXISTS;
8270 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8271 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8272 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8278 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8279 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8280 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8281 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8282 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8283 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8285 /* Update the previous switch rule of "forward to VSI" to
8288 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8292 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8293 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8294 m_entry->vsi_list_info =
8295 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8298 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8300 if (!m_entry->vsi_list_info)
8303 /* A rule already exists with the new VSI being added */
8304 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8307 /* Update the previously created VSI list set with
8308 * the new VSI ID passed in
8310 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8312 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8314 ice_aqc_opc_update_sw_rules,
8316 /* update VSI list mapping info with new VSI ID */
8318 ice_set_bit(vsi_handle,
8319 m_entry->vsi_list_info->vsi_map);
8322 m_entry->vsi_count++;
8327 * ice_add_adv_rule - helper function to create an advanced switch rule
8328 * @hw: pointer to the hardware structure
8329 * @lkups: information on the words that needs to be looked up. All words
8330 * together makes one recipe
8331 * @lkups_cnt: num of entries in the lkups array
8332 * @rinfo: other information related to the rule that needs to be programmed
8333 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8334 * ignored is case of error.
8336 * This function can program only 1 rule at a time. The lkups is used to
8337 * describe the all the words that forms the "lookup" portion of the recipe.
8338 * These words can span multiple protocols. Callers to this function need to
8339 * pass in a list of protocol headers with lookup information along and mask
8340 * that determines which words are valid from the given protocol header.
8341 * rinfo describes other information related to this rule such as forwarding
8342 * IDs, priority of this rule, etc.
8345 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8346 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8347 struct ice_rule_query_data *added_entry)
8349 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8350 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8351 const struct ice_dummy_pkt_offsets *pkt_offsets;
8352 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8353 struct LIST_HEAD_TYPE *rule_head;
8354 struct ice_switch_info *sw;
8355 enum ice_status status;
8356 const u8 *pkt = NULL;
8362 /* Initialize profile to result index bitmap */
8363 if (!hw->switch_info->prof_res_bm_init) {
8364 hw->switch_info->prof_res_bm_init = 1;
8365 ice_init_prof_result_bm(hw);
8368 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8369 if (!prof_rule && !lkups_cnt)
8370 return ICE_ERR_PARAM;
8372 /* get # of words we need to match */
8374 for (i = 0; i < lkups_cnt; i++) {
8377 ptr = (u16 *)&lkups[i].m_u;
8378 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8384 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8385 return ICE_ERR_PARAM;
8387 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8388 return ICE_ERR_PARAM;
8391 /* make sure that we can locate a dummy packet */
8392 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8395 status = ICE_ERR_PARAM;
8396 goto err_ice_add_adv_rule;
8399 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8400 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8401 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8402 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8405 vsi_handle = rinfo->sw_act.vsi_handle;
8406 if (!ice_is_vsi_valid(hw, vsi_handle))
8407 return ICE_ERR_PARAM;
8409 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8410 rinfo->sw_act.fwd_id.hw_vsi_id =
8411 ice_get_hw_vsi_num(hw, vsi_handle);
8412 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8413 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8415 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8418 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8420 /* we have to add VSI to VSI_LIST and increment vsi_count.
8421 * Also Update VSI list so that we can change forwarding rule
8422 * if the rule already exists, we will check if it exists with
8423 * same vsi_id, if not then add it to the VSI list if it already
8424 * exists if not then create a VSI list and add the existing VSI
8425 * ID and the new VSI ID to the list
8426 * We will add that VSI to the list
8428 status = ice_adv_add_update_vsi_list(hw, m_entry,
8429 &m_entry->rule_info,
8432 added_entry->rid = rid;
8433 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8434 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8438 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8439 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8441 return ICE_ERR_NO_MEMORY;
8442 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8443 switch (rinfo->sw_act.fltr_act) {
8444 case ICE_FWD_TO_VSI:
8445 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8446 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8447 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8450 act |= ICE_SINGLE_ACT_TO_Q;
8451 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8452 ICE_SINGLE_ACT_Q_INDEX_M;
8454 case ICE_FWD_TO_QGRP:
8455 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8456 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8457 act |= ICE_SINGLE_ACT_TO_Q;
8458 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8459 ICE_SINGLE_ACT_Q_INDEX_M;
8460 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8461 ICE_SINGLE_ACT_Q_REGION_M;
8463 case ICE_DROP_PACKET:
8464 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8465 ICE_SINGLE_ACT_VALID_BIT;
8468 status = ICE_ERR_CFG;
8469 goto err_ice_add_adv_rule;
8472 /* set the rule LOOKUP type based on caller specified 'RX'
8473 * instead of hardcoding it to be either LOOKUP_TX/RX
8475 * for 'RX' set the source to be the port number
8476 * for 'TX' set the source to be the source HW VSI number (determined
8480 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8481 s_rule->pdata.lkup_tx_rx.src =
8482 CPU_TO_LE16(hw->port_info->lport);
8484 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8485 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8488 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8489 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8491 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8492 pkt_len, pkt_offsets);
8494 goto err_ice_add_adv_rule;
8496 if (rinfo->tun_type != ICE_NON_TUN &&
8497 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8498 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8499 s_rule->pdata.lkup_tx_rx.hdr,
8502 goto err_ice_add_adv_rule;
8505 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8506 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8509 goto err_ice_add_adv_rule;
8510 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8511 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8513 status = ICE_ERR_NO_MEMORY;
8514 goto err_ice_add_adv_rule;
8517 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8518 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8519 ICE_NONDMA_TO_NONDMA);
8520 if (!adv_fltr->lkups && !prof_rule) {
8521 status = ICE_ERR_NO_MEMORY;
8522 goto err_ice_add_adv_rule;
8525 adv_fltr->lkups_cnt = lkups_cnt;
8526 adv_fltr->rule_info = *rinfo;
8527 adv_fltr->rule_info.fltr_rule_id =
8528 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8529 sw = hw->switch_info;
8530 sw->recp_list[rid].adv_rule = true;
8531 rule_head = &sw->recp_list[rid].filt_rules;
8533 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8534 adv_fltr->vsi_count = 1;
8536 /* Add rule entry to book keeping list */
8537 LIST_ADD(&adv_fltr->list_entry, rule_head);
8539 added_entry->rid = rid;
8540 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8541 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8543 err_ice_add_adv_rule:
8544 if (status && adv_fltr) {
8545 ice_free(hw, adv_fltr->lkups);
8546 ice_free(hw, adv_fltr);
8549 ice_free(hw, s_rule);
8555 * ice_adv_rem_update_vsi_list
8556 * @hw: pointer to the hardware structure
8557 * @vsi_handle: VSI handle of the VSI to remove
8558 * @fm_list: filter management entry for which the VSI list management needs to
8561 static enum ice_status
8562 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8563 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8565 struct ice_vsi_list_map_info *vsi_list_info;
8566 enum ice_sw_lkup_type lkup_type;
8567 enum ice_status status;
8570 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8571 fm_list->vsi_count == 0)
8572 return ICE_ERR_PARAM;
8574 /* A rule with the VSI being removed does not exist */
8575 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8576 return ICE_ERR_DOES_NOT_EXIST;
8578 lkup_type = ICE_SW_LKUP_LAST;
8579 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8580 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8581 ice_aqc_opc_update_sw_rules,
8586 fm_list->vsi_count--;
8587 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8588 vsi_list_info = fm_list->vsi_list_info;
8589 if (fm_list->vsi_count == 1) {
8590 struct ice_fltr_info tmp_fltr;
8593 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8595 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8596 return ICE_ERR_OUT_OF_RANGE;
8598 /* Make sure VSI list is empty before removing it below */
8599 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8601 ice_aqc_opc_update_sw_rules,
8606 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8607 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8608 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8609 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8610 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8611 tmp_fltr.fwd_id.hw_vsi_id =
8612 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8613 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8614 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8615 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8617 /* Update the previous switch rule of "MAC forward to VSI" to
8618 * "MAC fwd to VSI list"
8620 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8622 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8623 tmp_fltr.fwd_id.hw_vsi_id, status);
8626 fm_list->vsi_list_info->ref_cnt--;
8628 /* Remove the VSI list since it is no longer used */
8629 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8631 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8632 vsi_list_id, status);
8636 LIST_DEL(&vsi_list_info->list_entry);
8637 ice_free(hw, vsi_list_info);
8638 fm_list->vsi_list_info = NULL;
8645 * ice_rem_adv_rule - removes existing advanced switch rule
8646 * @hw: pointer to the hardware structure
8647 * @lkups: information on the words that needs to be looked up. All words
8648 * together makes one recipe
8649 * @lkups_cnt: num of entries in the lkups array
8650 * @rinfo: Its the pointer to the rule information for the rule
8652 * This function can be used to remove 1 rule at a time. The lkups is
8653 * used to describe all the words that forms the "lookup" portion of the
8654 * rule. These words can span multiple protocols. Callers to this function
8655 * need to pass in a list of protocol headers with lookup information along
8656 * and mask that determines which words are valid from the given protocol
8657 * header. rinfo describes other information related to this rule such as
8658 * forwarding IDs, priority of this rule, etc.
8661 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8662 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8664 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8665 struct ice_prot_lkup_ext lkup_exts;
8666 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8667 enum ice_status status = ICE_SUCCESS;
8668 bool remove_rule = false;
8669 u16 i, rid, vsi_handle;
8671 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8672 for (i = 0; i < lkups_cnt; i++) {
8675 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8678 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8683 /* Create any special protocol/offset pairs, such as looking at tunnel
8684 * bits by extracting metadata
8686 status = ice_add_special_words(rinfo, &lkup_exts);
8690 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8691 /* If did not find a recipe that match the existing criteria */
8692 if (rid == ICE_MAX_NUM_RECIPES)
8693 return ICE_ERR_PARAM;
8695 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8696 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8697 /* the rule is already removed */
8700 ice_acquire_lock(rule_lock);
8701 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8703 } else if (list_elem->vsi_count > 1) {
8704 remove_rule = false;
8705 vsi_handle = rinfo->sw_act.vsi_handle;
8706 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8708 vsi_handle = rinfo->sw_act.vsi_handle;
8709 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8711 ice_release_lock(rule_lock);
8714 if (list_elem->vsi_count == 0)
8717 ice_release_lock(rule_lock);
8719 struct ice_aqc_sw_rules_elem *s_rule;
8722 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8723 s_rule = (struct ice_aqc_sw_rules_elem *)
8724 ice_malloc(hw, rule_buf_sz);
8726 return ICE_ERR_NO_MEMORY;
8727 s_rule->pdata.lkup_tx_rx.act = 0;
8728 s_rule->pdata.lkup_tx_rx.index =
8729 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8730 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8731 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8733 ice_aqc_opc_remove_sw_rules, NULL);
8734 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8735 struct ice_switch_info *sw = hw->switch_info;
8737 ice_acquire_lock(rule_lock);
8738 LIST_DEL(&list_elem->list_entry);
8739 ice_free(hw, list_elem->lkups);
8740 ice_free(hw, list_elem);
8741 ice_release_lock(rule_lock);
8742 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8743 sw->recp_list[rid].adv_rule = false;
8745 ice_free(hw, s_rule);
8751 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8752 * @hw: pointer to the hardware structure
8753 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8755 * This function is used to remove 1 rule at a time. The removal is based on
8756 * the remove_entry parameter. This function will remove rule for a given
8757 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8760 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8761 struct ice_rule_query_data *remove_entry)
8763 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8764 struct LIST_HEAD_TYPE *list_head;
8765 struct ice_adv_rule_info rinfo;
8766 struct ice_switch_info *sw;
8768 sw = hw->switch_info;
8769 if (!sw->recp_list[remove_entry->rid].recp_created)
8770 return ICE_ERR_PARAM;
8771 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8772 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8774 if (list_itr->rule_info.fltr_rule_id ==
8775 remove_entry->rule_id) {
8776 rinfo = list_itr->rule_info;
8777 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8778 return ice_rem_adv_rule(hw, list_itr->lkups,
8779 list_itr->lkups_cnt, &rinfo);
8782 /* either list is empty or unable to find rule */
8783 return ICE_ERR_DOES_NOT_EXIST;
8787 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8789 * @hw: pointer to the hardware structure
8790 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8792 * This function is used to remove all the rules for a given VSI and as soon
8793 * as removing a rule fails, it will return immediately with the error code,
8794 * else it will return ICE_SUCCESS
8796 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8798 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8799 struct ice_vsi_list_map_info *map_info;
8800 struct LIST_HEAD_TYPE *list_head;
8801 struct ice_adv_rule_info rinfo;
8802 struct ice_switch_info *sw;
8803 enum ice_status status;
8806 sw = hw->switch_info;
8807 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8808 if (!sw->recp_list[rid].recp_created)
8810 if (!sw->recp_list[rid].adv_rule)
8813 list_head = &sw->recp_list[rid].filt_rules;
8814 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8815 ice_adv_fltr_mgmt_list_entry,
8817 rinfo = list_itr->rule_info;
8819 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8820 map_info = list_itr->vsi_list_info;
8824 if (!ice_is_bit_set(map_info->vsi_map,
8827 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8831 rinfo.sw_act.vsi_handle = vsi_handle;
8832 status = ice_rem_adv_rule(hw, list_itr->lkups,
8833 list_itr->lkups_cnt, &rinfo);
8843 * ice_replay_fltr - Replay all the filters stored by a specific list head
8844 * @hw: pointer to the hardware structure
8845 * @list_head: list for which filters needs to be replayed
8846 * @recp_id: Recipe ID for which rules need to be replayed
8848 static enum ice_status
8849 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8851 struct ice_fltr_mgmt_list_entry *itr;
8852 enum ice_status status = ICE_SUCCESS;
8853 struct ice_sw_recipe *recp_list;
8854 u8 lport = hw->port_info->lport;
8855 struct LIST_HEAD_TYPE l_head;
8857 if (LIST_EMPTY(list_head))
8860 recp_list = &hw->switch_info->recp_list[recp_id];
8861 /* Move entries from the given list_head to a temporary l_head so that
8862 * they can be replayed. Otherwise when trying to re-add the same
8863 * filter, the function will return already exists
8865 LIST_REPLACE_INIT(list_head, &l_head);
8867 /* Mark the given list_head empty by reinitializing it so filters
8868 * could be added again by *handler
8870 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8872 struct ice_fltr_list_entry f_entry;
8875 f_entry.fltr_info = itr->fltr_info;
8876 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8877 status = ice_add_rule_internal(hw, recp_list, lport,
8879 if (status != ICE_SUCCESS)
8884 /* Add a filter per VSI separately */
8885 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8887 if (!ice_is_vsi_valid(hw, vsi_handle))
8890 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8891 f_entry.fltr_info.vsi_handle = vsi_handle;
8892 f_entry.fltr_info.fwd_id.hw_vsi_id =
8893 ice_get_hw_vsi_num(hw, vsi_handle);
8894 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8895 if (recp_id == ICE_SW_LKUP_VLAN)
8896 status = ice_add_vlan_internal(hw, recp_list,
8899 status = ice_add_rule_internal(hw, recp_list,
8902 if (status != ICE_SUCCESS)
8907 /* Clear the filter management list */
8908 ice_rem_sw_rule_info(hw, &l_head);
8913 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8914 * @hw: pointer to the hardware structure
8916 * NOTE: This function does not clean up partially added filters on error.
8917 * It is up to caller of the function to issue a reset or fail early.
8919 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8921 struct ice_switch_info *sw = hw->switch_info;
8922 enum ice_status status = ICE_SUCCESS;
8925 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8926 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8928 status = ice_replay_fltr(hw, i, head);
8929 if (status != ICE_SUCCESS)
8936 * ice_replay_vsi_fltr - Replay filters for requested VSI
8937 * @hw: pointer to the hardware structure
8938 * @pi: pointer to port information structure
8939 * @sw: pointer to switch info struct for which function replays filters
8940 * @vsi_handle: driver VSI handle
8941 * @recp_id: Recipe ID for which rules need to be replayed
8942 * @list_head: list for which filters need to be replayed
8944 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8945 * It is required to pass valid VSI handle.
8947 static enum ice_status
8948 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8949 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8950 struct LIST_HEAD_TYPE *list_head)
8952 struct ice_fltr_mgmt_list_entry *itr;
8953 enum ice_status status = ICE_SUCCESS;
8954 struct ice_sw_recipe *recp_list;
8957 if (LIST_EMPTY(list_head))
8959 recp_list = &sw->recp_list[recp_id];
8960 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8962 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8964 struct ice_fltr_list_entry f_entry;
8966 f_entry.fltr_info = itr->fltr_info;
8967 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8968 itr->fltr_info.vsi_handle == vsi_handle) {
8969 /* update the src in case it is VSI num */
8970 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8971 f_entry.fltr_info.src = hw_vsi_id;
8972 status = ice_add_rule_internal(hw, recp_list,
8975 if (status != ICE_SUCCESS)
8979 if (!itr->vsi_list_info ||
8980 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8982 /* Clearing it so that the logic can add it back */
8983 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8984 f_entry.fltr_info.vsi_handle = vsi_handle;
8985 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8986 /* update the src in case it is VSI num */
8987 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8988 f_entry.fltr_info.src = hw_vsi_id;
8989 if (recp_id == ICE_SW_LKUP_VLAN)
8990 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8992 status = ice_add_rule_internal(hw, recp_list,
8995 if (status != ICE_SUCCESS)
9003 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9004 * @hw: pointer to the hardware structure
9005 * @vsi_handle: driver VSI handle
9006 * @list_head: list for which filters need to be replayed
9008 * Replay the advanced rule for the given VSI.
9010 static enum ice_status
9011 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9012 struct LIST_HEAD_TYPE *list_head)
9014 struct ice_rule_query_data added_entry = { 0 };
9015 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9016 enum ice_status status = ICE_SUCCESS;
9018 if (LIST_EMPTY(list_head))
9020 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9022 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9023 u16 lk_cnt = adv_fltr->lkups_cnt;
9025 if (vsi_handle != rinfo->sw_act.vsi_handle)
9027 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9036 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9037 * @hw: pointer to the hardware structure
9038 * @pi: pointer to port information structure
9039 * @vsi_handle: driver VSI handle
9041 * Replays filters for requested VSI via vsi_handle.
9044 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9047 struct ice_switch_info *sw = hw->switch_info;
9048 enum ice_status status;
9051 /* Update the recipes that were created */
9052 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9053 struct LIST_HEAD_TYPE *head;
9055 head = &sw->recp_list[i].filt_replay_rules;
9056 if (!sw->recp_list[i].adv_rule)
9057 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9060 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9061 if (status != ICE_SUCCESS)
9069 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
9070 * @hw: pointer to the HW struct
9071 * @sw: pointer to switch info struct for which function removes filters
9073 * Deletes the filter replay rules for given switch
9075 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9082 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9083 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9084 struct LIST_HEAD_TYPE *l_head;
9086 l_head = &sw->recp_list[i].filt_replay_rules;
9087 if (!sw->recp_list[i].adv_rule)
9088 ice_rem_sw_rule_info(hw, l_head);
9090 ice_rem_adv_rule_info(hw, l_head);
9096 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9097 * @hw: pointer to the HW struct
9099 * Deletes the filter replay rules.
9101 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9103 ice_rm_sw_replay_rule_info(hw, hw->switch_info);