1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV6_ETHER_ID 0x86DD
14 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
15 #define ICE_PPP_IPV6_PROTO_ID 0x0057
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_GTPU_PROFILE 24
18 #define ICE_ETH_P_8021Q 0x8100
19 #define ICE_MPLS_ETHER_ID 0x8847
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22 * struct to configure any switch filter rules.
23 * {DA (6 bytes), SA(6 bytes),
24 * Ether type (2 bytes for header without VLAN tag) OR
25 * VLAN tag (4 bytes for header with VLAN tag) }
27 * Word on Hardcoded values
28 * byte 0 = 0x2: to identify it as locally administered DA MAC
29 * byte 6 = 0x2: to identify it as locally administered SA MAC
30 * byte 12 = 0x81 & byte 13 = 0x00:
31 * In case of VLAN filter first two bytes defines ether type (0x8100)
32 * and remaining two bytes are placeholder for programming a given VLAN ID
33 * In case of Ether type filter it is treated as header without VLAN tag
34 * and byte 12 and 13 is used to program a given Ether type instead
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
40 struct ice_dummy_pkt_offsets {
41 enum ice_protocol_type type;
42 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
48 { ICE_IPV4_OFOS, 14 },
53 { ICE_PROTOCOL_LAST, 0 },
56 static const u8 dummy_gre_tcp_packet[] = {
57 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
58 0x00, 0x00, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00,
61 0x08, 0x00, /* ICE_ETYPE_OL 12 */
63 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x2F, 0x00, 0x00,
66 0x00, 0x00, 0x00, 0x00,
67 0x00, 0x00, 0x00, 0x00,
69 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
70 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
77 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x06, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
86 0x50, 0x02, 0x20, 0x00,
87 0x00, 0x00, 0x00, 0x00
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
93 { ICE_IPV4_OFOS, 14 },
98 { ICE_PROTOCOL_LAST, 0 },
101 static const u8 dummy_gre_udp_packet[] = {
102 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
103 0x00, 0x00, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00,
106 0x08, 0x00, /* ICE_ETYPE_OL 12 */
108 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x2F, 0x00, 0x00,
111 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00,
114 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
115 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
122 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x11, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
129 0x00, 0x08, 0x00, 0x00,
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
134 { ICE_ETYPE_OL, 12 },
135 { ICE_IPV4_OFOS, 14 },
139 { ICE_VXLAN_GPE, 42 },
143 { ICE_PROTOCOL_LAST, 0 },
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
148 0x00, 0x00, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
151 0x08, 0x00, /* ICE_ETYPE_OL 12 */
153 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154 0x00, 0x01, 0x00, 0x00,
155 0x40, 0x11, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160 0x00, 0x46, 0x00, 0x00,
162 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
170 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171 0x00, 0x01, 0x00, 0x00,
172 0x40, 0x06, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
179 0x50, 0x02, 0x20, 0x00,
180 0x00, 0x00, 0x00, 0x00
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
185 { ICE_ETYPE_OL, 12 },
186 { ICE_IPV4_OFOS, 14 },
190 { ICE_VXLAN_GPE, 42 },
193 { ICE_UDP_ILOS, 84 },
194 { ICE_PROTOCOL_LAST, 0 },
197 static const u8 dummy_udp_tun_udp_packet[] = {
198 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
199 0x00, 0x00, 0x00, 0x00,
200 0x00, 0x00, 0x00, 0x00,
202 0x08, 0x00, /* ICE_ETYPE_OL 12 */
204 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205 0x00, 0x01, 0x00, 0x00,
206 0x00, 0x11, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211 0x00, 0x3a, 0x00, 0x00,
213 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
221 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222 0x00, 0x01, 0x00, 0x00,
223 0x00, 0x11, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228 0x00, 0x08, 0x00, 0x00,
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
234 { ICE_ETYPE_OL, 12 },
235 { ICE_IPV4_OFOS, 14 },
236 { ICE_UDP_ILOS, 34 },
237 { ICE_PROTOCOL_LAST, 0 },
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
246 0x08, 0x00, /* ICE_ETYPE_OL 12 */
248 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249 0x00, 0x01, 0x00, 0x00,
250 0x00, 0x11, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00,
254 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255 0x00, 0x08, 0x00, 0x00,
257 0x00, 0x00, /* 2 bytes for 4 byte alignment */
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
263 { ICE_VLAN_OFOS, 12 },
264 { ICE_ETYPE_OL, 16 },
265 { ICE_IPV4_OFOS, 18 },
266 { ICE_UDP_ILOS, 38 },
267 { ICE_PROTOCOL_LAST, 0 },
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
278 0x08, 0x00, /* ICE_ETYPE_OL 16 */
280 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281 0x00, 0x01, 0x00, 0x00,
282 0x00, 0x11, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00,
286 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287 0x00, 0x08, 0x00, 0x00,
289 0x00, 0x00, /* 2 bytes for 4 byte alignment */
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
295 { ICE_ETYPE_OL, 12 },
296 { ICE_IPV4_OFOS, 14 },
298 { ICE_PROTOCOL_LAST, 0 },
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
307 0x08, 0x00, /* ICE_ETYPE_OL 12 */
309 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310 0x00, 0x01, 0x00, 0x00,
311 0x00, 0x06, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
318 0x50, 0x00, 0x00, 0x00,
319 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, /* 2 bytes for 4 byte alignment */
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
327 { ICE_ETYPE_OL, 12 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x88, 0x47, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x01, 0x00,
340 0x00, 0x00, /* 2 bytes for 4 byte alignment */
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
346 { ICE_VLAN_OFOS, 12 },
347 { ICE_ETYPE_OL, 16 },
348 { ICE_IPV4_OFOS, 18 },
350 { ICE_PROTOCOL_LAST, 0 },
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356 0x00, 0x00, 0x00, 0x00,
357 0x00, 0x00, 0x00, 0x00,
359 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
361 0x08, 0x00, /* ICE_ETYPE_OL 16 */
363 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364 0x00, 0x01, 0x00, 0x00,
365 0x00, 0x06, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x50, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, /* 2 bytes for 4 byte alignment */
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
380 { ICE_ETYPE_OL, 12 },
381 { ICE_IPV6_OFOS, 14 },
383 { ICE_PROTOCOL_LAST, 0 },
386 static const u8 dummy_tcp_ipv6_packet[] = {
387 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, 0x00, 0x00,
391 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
393 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
407 0x50, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, /* 2 bytes for 4 byte alignment */
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
417 { ICE_VLAN_OFOS, 12 },
418 { ICE_ETYPE_OL, 16 },
419 { ICE_IPV6_OFOS, 18 },
421 { ICE_PROTOCOL_LAST, 0 },
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
432 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
434 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
448 0x50, 0x00, 0x00, 0x00,
449 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, /* 2 bytes for 4 byte alignment */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
457 { ICE_ETYPE_OL, 12 },
458 { ICE_IPV6_OFOS, 14 },
459 { ICE_UDP_ILOS, 54 },
460 { ICE_PROTOCOL_LAST, 0 },
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, 0x00, 0x00,
469 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
471 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483 0x00, 0x10, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486 0x00, 0x00, 0x00, 0x00,
488 0x00, 0x00, /* 2 bytes for 4 byte alignment */
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
495 { ICE_VLAN_OFOS, 12 },
496 { ICE_ETYPE_OL, 16 },
497 { ICE_IPV6_OFOS, 18 },
498 { ICE_UDP_ILOS, 58 },
499 { ICE_PROTOCOL_LAST, 0 },
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505 0x00, 0x00, 0x00, 0x00,
506 0x00, 0x00, 0x00, 0x00,
508 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
510 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
512 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524 0x00, 0x08, 0x00, 0x00,
526 0x00, 0x00, /* 2 bytes for 4 byte alignment */
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
532 { ICE_IPV4_OFOS, 14 },
537 { ICE_PROTOCOL_LAST, 0 },
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
546 0x45, 0x00, 0x00, 0x58, /* IP 14 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x11, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553 0x00, 0x44, 0x00, 0x00,
555 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x85,
559 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560 0x00, 0x00, 0x00, 0x00,
562 0x45, 0x00, 0x00, 0x28, /* IP 62 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x06, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x00,
566 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x50, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 byte alignment */
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
580 { ICE_IPV4_OFOS, 14 },
584 { ICE_UDP_ILOS, 82 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595 0x00, 0x00, 0x00, 0x00,
596 0x00, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601 0x00, 0x38, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608 0x00, 0x00, 0x00, 0x00,
610 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x11, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617 0x00, 0x08, 0x00, 0x00,
619 0x00, 0x00, /* 2 bytes for 4 byte alignment */
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
625 { ICE_IPV4_OFOS, 14 },
630 { ICE_PROTOCOL_LAST, 0 },
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
639 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x11, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646 0x00, 0x58, 0x00, 0x00,
648 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x85,
652 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653 0x00, 0x00, 0x00, 0x00,
655 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656 0x00, 0x14, 0x06, 0x00,
657 0x00, 0x00, 0x00, 0x00,
658 0x00, 0x00, 0x00, 0x00,
659 0x00, 0x00, 0x00, 0x00,
660 0x00, 0x00, 0x00, 0x00,
661 0x00, 0x00, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00,
669 0x50, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, /* 2 bytes for 4 byte alignment */
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
677 { ICE_IPV4_OFOS, 14 },
681 { ICE_UDP_ILOS, 102 },
682 { ICE_PROTOCOL_LAST, 0 },
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
691 0x45, 0x00, 0x00, 0x60, /* IP 14 */
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x11, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698 0x00, 0x4c, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708 0x00, 0x08, 0x11, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719 0x00, 0x08, 0x00, 0x00,
721 0x00, 0x00, /* 2 bytes for 4 byte alignment */
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
726 { ICE_IPV6_OFOS, 14 },
731 { ICE_PROTOCOL_LAST, 0 },
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736 0x00, 0x00, 0x00, 0x00,
737 0x00, 0x00, 0x00, 0x00,
740 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741 0x00, 0x44, 0x11, 0x00,
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752 0x00, 0x44, 0x00, 0x00,
754 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755 0x00, 0x00, 0x00, 0x00,
756 0x00, 0x00, 0x00, 0x85,
758 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759 0x00, 0x00, 0x00, 0x00,
761 0x45, 0x00, 0x00, 0x28, /* IP 82 */
762 0x00, 0x00, 0x00, 0x00,
763 0x00, 0x06, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x50, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, /* 2 bytes for 4 byte alignment */
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
778 { ICE_IPV6_OFOS, 14 },
782 { ICE_UDP_ILOS, 102 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793 0x00, 0x38, 0x11, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
803 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804 0x00, 0x38, 0x00, 0x00,
806 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x85,
810 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811 0x00, 0x00, 0x00, 0x00,
813 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x11, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820 0x00, 0x08, 0x00, 0x00,
822 0x00, 0x00, /* 2 bytes for 4 byte alignment */
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
827 { ICE_IPV6_OFOS, 14 },
832 { ICE_PROTOCOL_LAST, 0 },
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837 0x00, 0x00, 0x00, 0x00,
838 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842 0x00, 0x58, 0x11, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853 0x00, 0x58, 0x00, 0x00,
855 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, 0x00, 0x85,
859 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860 0x00, 0x00, 0x00, 0x00,
862 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863 0x00, 0x14, 0x06, 0x00,
864 0x00, 0x00, 0x00, 0x00,
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
869 0x00, 0x00, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
871 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x50, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, /* 2 bytes for 4 byte alignment */
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
884 { ICE_IPV6_OFOS, 14 },
888 { ICE_UDP_ILOS, 122 },
889 { ICE_PROTOCOL_LAST, 0 },
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 0x00, 0x4c, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 0x00, 0x4c, 0x00, 0x00,
912 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x85,
916 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 0x00, 0x00, 0x00, 0x00,
919 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920 0x00, 0x08, 0x11, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
928 0x00, 0x00, 0x00, 0x00,
930 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931 0x00, 0x08, 0x00, 0x00,
933 0x00, 0x00, /* 2 bytes for 4 byte alignment */
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
938 { ICE_IPV4_OFOS, 14 },
942 { ICE_PROTOCOL_LAST, 0 },
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947 0x00, 0x00, 0x00, 0x00,
948 0x00, 0x00, 0x00, 0x00,
951 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952 0x00, 0x00, 0x40, 0x00,
953 0x40, 0x11, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
957 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958 0x00, 0x00, 0x00, 0x00,
960 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
961 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x85,
964 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965 0x00, 0x00, 0x00, 0x00,
967 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968 0x00, 0x00, 0x40, 0x00,
969 0x40, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
978 { ICE_IPV4_OFOS, 14 },
982 { ICE_PROTOCOL_LAST, 0 },
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987 0x00, 0x00, 0x00, 0x00,
988 0x00, 0x00, 0x00, 0x00,
991 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992 0x00, 0x00, 0x40, 0x00,
993 0x40, 0x11, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998 0x00, 0x00, 0x00, 0x00,
1000 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1001 0x00, 0x00, 0x00, 0x00,
1002 0x00, 0x00, 0x00, 0x85,
1004 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005 0x00, 0x00, 0x00, 0x00,
1007 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008 0x00, 0x00, 0x3b, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1015 0x00, 0x00, 0x00, 0x00,
1016 0x00, 0x00, 0x00, 0x00,
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023 { ICE_MAC_OFOS, 0 },
1024 { ICE_IPV6_OFOS, 14 },
1027 { ICE_IPV4_IL, 82 },
1028 { ICE_PROTOCOL_LAST, 0 },
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00,
1037 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039 0x00, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1044 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00,
1046 0x00, 0x00, 0x00, 0x00,
1048 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049 0x00, 0x00, 0x00, 0x00,
1051 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x85,
1055 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056 0x00, 0x00, 0x00, 0x00,
1058 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059 0x00, 0x00, 0x40, 0x00,
1060 0x40, 0x00, 0x00, 0x00,
1061 0x00, 0x00, 0x00, 0x00,
1062 0x00, 0x00, 0x00, 0x00,
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069 { ICE_MAC_OFOS, 0 },
1070 { ICE_IPV6_OFOS, 14 },
1073 { ICE_IPV6_IL, 82 },
1074 { ICE_PROTOCOL_LAST, 0 },
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1094 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095 0x00, 0x00, 0x00, 0x00,
1097 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x85,
1101 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102 0x00, 0x00, 0x00, 0x00,
1104 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105 0x00, 0x00, 0x3b, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1109 0x00, 0x00, 0x00, 0x00,
1110 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, 0x00, 0x00,
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119 { ICE_MAC_OFOS, 0 },
1120 { ICE_IPV4_OFOS, 14 },
1123 { ICE_PROTOCOL_LAST, 0 },
1126 static const u8 dummy_udp_gtp_packet[] = {
1127 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128 0x00, 0x00, 0x00, 0x00,
1129 0x00, 0x00, 0x00, 0x00,
1132 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x11, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139 0x00, 0x1c, 0x00, 0x00,
1141 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142 0x00, 0x00, 0x00, 0x00,
1143 0x00, 0x00, 0x00, 0x85,
1145 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146 0x00, 0x00, 0x00, 0x00,
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_GTP_NO_PAY, 42 },
1155 { ICE_PROTOCOL_LAST, 0 },
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160 { ICE_MAC_OFOS, 0 },
1161 { ICE_IPV6_OFOS, 14 },
1163 { ICE_GTP_NO_PAY, 62 },
1164 { ICE_PROTOCOL_LAST, 0 },
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169 0x00, 0x00, 0x00, 0x00,
1170 0x00, 0x00, 0x00, 0x00,
1173 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175 0x00, 0x00, 0x00, 0x00,
1176 0x00, 0x00, 0x00, 0x00,
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, 0x00, 0x00,
1184 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185 0x00, 0x00, 0x00, 0x00,
1187 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1188 0x00, 0x00, 0x00, 0x00,
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194 { ICE_MAC_OFOS, 0 },
1195 { ICE_VLAN_OFOS, 12 },
1196 { ICE_ETYPE_OL, 16 },
1198 { ICE_PROTOCOL_LAST, 0 },
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202 { ICE_MAC_OFOS, 0 },
1203 { ICE_VLAN_OFOS, 12 },
1204 { ICE_ETYPE_OL, 16 },
1206 { ICE_IPV4_OFOS, 26 },
1207 { ICE_PROTOCOL_LAST, 0 },
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 0x00, 0x00, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1215 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1217 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1219 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1222 0x00, 0x21, /* PPP Link Layer 24 */
1224 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00,
1227 0x00, 0x00, 0x00, 0x00,
1228 0x00, 0x00, 0x00, 0x00,
1230 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235 { ICE_MAC_OFOS, 0 },
1236 { ICE_VLAN_OFOS, 12 },
1237 { ICE_ETYPE_OL, 16 },
1239 { ICE_IPV4_OFOS, 26 },
1241 { ICE_PROTOCOL_LAST, 0 },
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246 0x00, 0x00, 0x00, 0x00,
1247 0x00, 0x00, 0x00, 0x00,
1249 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1251 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1253 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1256 0x00, 0x21, /* PPP Link Layer 24 */
1258 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259 0x00, 0x01, 0x00, 0x00,
1260 0x00, 0x06, 0x00, 0x00,
1261 0x00, 0x00, 0x00, 0x00,
1262 0x00, 0x00, 0x00, 0x00,
1264 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265 0x00, 0x00, 0x00, 0x00,
1266 0x00, 0x00, 0x00, 0x00,
1267 0x50, 0x00, 0x00, 0x00,
1268 0x00, 0x00, 0x00, 0x00,
1270 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275 { ICE_MAC_OFOS, 0 },
1276 { ICE_VLAN_OFOS, 12 },
1277 { ICE_ETYPE_OL, 16 },
1279 { ICE_IPV4_OFOS, 26 },
1280 { ICE_UDP_ILOS, 46 },
1281 { ICE_PROTOCOL_LAST, 0 },
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286 0x00, 0x00, 0x00, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1289 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1291 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1293 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1296 0x00, 0x21, /* PPP Link Layer 24 */
1298 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299 0x00, 0x01, 0x00, 0x00,
1300 0x00, 0x11, 0x00, 0x00,
1301 0x00, 0x00, 0x00, 0x00,
1302 0x00, 0x00, 0x00, 0x00,
1304 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305 0x00, 0x08, 0x00, 0x00,
1307 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311 { ICE_MAC_OFOS, 0 },
1312 { ICE_VLAN_OFOS, 12 },
1313 { ICE_ETYPE_OL, 16 },
1315 { ICE_IPV6_OFOS, 26 },
1316 { ICE_PROTOCOL_LAST, 0 },
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321 0x00, 0x00, 0x00, 0x00,
1322 0x00, 0x00, 0x00, 0x00,
1324 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1326 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1328 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1331 0x00, 0x57, /* PPP Link Layer 24 */
1333 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334 0x00, 0x00, 0x3b, 0x00,
1335 0x00, 0x00, 0x00, 0x00,
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x00, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349 { ICE_MAC_OFOS, 0 },
1350 { ICE_VLAN_OFOS, 12 },
1351 { ICE_ETYPE_OL, 16 },
1353 { ICE_IPV6_OFOS, 26 },
1355 { ICE_PROTOCOL_LAST, 0 },
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360 0x00, 0x00, 0x00, 0x00,
1361 0x00, 0x00, 0x00, 0x00,
1363 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1365 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1367 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1370 0x00, 0x57, /* PPP Link Layer 24 */
1372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00,
1381 0x00, 0x00, 0x00, 0x00,
1383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384 0x00, 0x00, 0x00, 0x00,
1385 0x00, 0x00, 0x00, 0x00,
1386 0x50, 0x00, 0x00, 0x00,
1387 0x00, 0x00, 0x00, 0x00,
1389 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394 { ICE_MAC_OFOS, 0 },
1395 { ICE_VLAN_OFOS, 12 },
1396 { ICE_ETYPE_OL, 16 },
1398 { ICE_IPV6_OFOS, 26 },
1399 { ICE_UDP_ILOS, 66 },
1400 { ICE_PROTOCOL_LAST, 0 },
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405 0x00, 0x00, 0x00, 0x00,
1406 0x00, 0x00, 0x00, 0x00,
1408 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1410 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1412 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1415 0x00, 0x57, /* PPP Link Layer 24 */
1417 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1421 0x00, 0x00, 0x00, 0x00,
1422 0x00, 0x00, 0x00, 0x00,
1423 0x00, 0x00, 0x00, 0x00,
1424 0x00, 0x00, 0x00, 0x00,
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429 0x00, 0x08, 0x00, 0x00,
1431 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435 { ICE_MAC_OFOS, 0 },
1436 { ICE_IPV4_OFOS, 14 },
1438 { ICE_PROTOCOL_LAST, 0 },
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443 0x00, 0x00, 0x00, 0x00,
1444 0x00, 0x00, 0x00, 0x00,
1447 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448 0x00, 0x00, 0x40, 0x00,
1449 0x40, 0x32, 0x00, 0x00,
1450 0x00, 0x00, 0x00, 0x00,
1451 0x00, 0x00, 0x00, 0x00,
1453 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454 0x00, 0x00, 0x00, 0x00,
1455 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459 { ICE_MAC_OFOS, 0 },
1460 { ICE_IPV6_OFOS, 14 },
1462 { ICE_PROTOCOL_LAST, 0 },
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467 0x00, 0x00, 0x00, 0x00,
1468 0x00, 0x00, 0x00, 0x00,
1471 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1475 0x00, 0x00, 0x00, 0x00,
1476 0x00, 0x00, 0x00, 0x00,
1477 0x00, 0x00, 0x00, 0x00,
1478 0x00, 0x00, 0x00, 0x00,
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488 { ICE_MAC_OFOS, 0 },
1489 { ICE_IPV4_OFOS, 14 },
1491 { ICE_PROTOCOL_LAST, 0 },
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496 0x00, 0x00, 0x00, 0x00,
1497 0x00, 0x00, 0x00, 0x00,
1500 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501 0x00, 0x00, 0x40, 0x00,
1502 0x40, 0x33, 0x00, 0x00,
1503 0x00, 0x00, 0x00, 0x00,
1504 0x00, 0x00, 0x00, 0x00,
1506 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507 0x00, 0x00, 0x00, 0x00,
1508 0x00, 0x00, 0x00, 0x00,
1509 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513 { ICE_MAC_OFOS, 0 },
1514 { ICE_IPV6_OFOS, 14 },
1516 { ICE_PROTOCOL_LAST, 0 },
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521 0x00, 0x00, 0x00, 0x00,
1522 0x00, 0x00, 0x00, 0x00,
1525 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527 0x00, 0x00, 0x00, 0x00,
1528 0x00, 0x00, 0x00, 0x00,
1529 0x00, 0x00, 0x00, 0x00,
1530 0x00, 0x00, 0x00, 0x00,
1531 0x00, 0x00, 0x00, 0x00,
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1534 0x00, 0x00, 0x00, 0x00,
1536 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537 0x00, 0x00, 0x00, 0x00,
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543 { ICE_MAC_OFOS, 0 },
1544 { ICE_IPV4_OFOS, 14 },
1545 { ICE_UDP_ILOS, 34 },
1547 { ICE_PROTOCOL_LAST, 0 },
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, 0x00, 0x00,
1556 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557 0x00, 0x00, 0x40, 0x00,
1558 0x40, 0x11, 0x00, 0x00,
1559 0x00, 0x00, 0x00, 0x00,
1560 0x00, 0x00, 0x00, 0x00,
1562 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563 0x00, 0x00, 0x00, 0x00,
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1567 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571 { ICE_MAC_OFOS, 0 },
1572 { ICE_IPV6_OFOS, 14 },
1573 { ICE_UDP_ILOS, 54 },
1575 { ICE_PROTOCOL_LAST, 0 },
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580 0x00, 0x00, 0x00, 0x00,
1581 0x00, 0x00, 0x00, 0x00,
1584 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586 0x00, 0x00, 0x00, 0x00,
1587 0x00, 0x00, 0x00, 0x00,
1588 0x00, 0x00, 0x00, 0x00,
1589 0x00, 0x00, 0x00, 0x00,
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1592 0x00, 0x00, 0x00, 0x00,
1593 0x00, 0x00, 0x00, 0x00,
1595 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605 { ICE_MAC_OFOS, 0 },
1606 { ICE_IPV4_OFOS, 14 },
1608 { ICE_PROTOCOL_LAST, 0 },
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613 0x00, 0x00, 0x00, 0x00,
1614 0x00, 0x00, 0x00, 0x00,
1617 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618 0x00, 0x00, 0x40, 0x00,
1619 0x40, 0x73, 0x00, 0x00,
1620 0x00, 0x00, 0x00, 0x00,
1621 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624 0x00, 0x00, 0x00, 0x00,
1625 0x00, 0x00, 0x00, 0x00,
1626 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630 { ICE_MAC_OFOS, 0 },
1631 { ICE_IPV6_OFOS, 14 },
1633 { ICE_PROTOCOL_LAST, 0 },
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638 0x00, 0x00, 0x00, 0x00,
1639 0x00, 0x00, 0x00, 0x00,
1642 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643 0x00, 0x0c, 0x73, 0x40,
1644 0x00, 0x00, 0x00, 0x00,
1645 0x00, 0x00, 0x00, 0x00,
1646 0x00, 0x00, 0x00, 0x00,
1647 0x00, 0x00, 0x00, 0x00,
1648 0x00, 0x00, 0x00, 0x00,
1649 0x00, 0x00, 0x00, 0x00,
1650 0x00, 0x00, 0x00, 0x00,
1651 0x00, 0x00, 0x00, 0x00,
1653 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654 0x00, 0x00, 0x00, 0x00,
1655 0x00, 0x00, 0x00, 0x00,
1656 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660 { ICE_MAC_OFOS, 0 },
1661 { ICE_VLAN_EX, 12 },
1662 { ICE_VLAN_IN, 16 },
1663 { ICE_ETYPE_OL, 20 },
1664 { ICE_IPV4_OFOS, 22 },
1665 { ICE_PROTOCOL_LAST, 0 },
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670 0x00, 0x00, 0x00, 0x00,
1671 0x00, 0x00, 0x00, 0x00,
1673 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1674 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1675 0x08, 0x00, /* ICE_ETYPE_OL 20 */
1677 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1678 0x00, 0x01, 0x00, 0x00,
1679 0x00, 0x11, 0x00, 0x00,
1680 0x00, 0x00, 0x00, 0x00,
1681 0x00, 0x00, 0x00, 0x00,
1683 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1684 0x00, 0x08, 0x00, 0x00,
1686 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1689 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1690 { ICE_MAC_OFOS, 0 },
1691 { ICE_VLAN_EX, 12 },
1692 { ICE_VLAN_IN, 16 },
1693 { ICE_ETYPE_OL, 20 },
1694 { ICE_IPV6_OFOS, 22 },
1695 { ICE_PROTOCOL_LAST, 0 },
1698 static const u8 dummy_qinq_ipv6_pkt[] = {
1699 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1700 0x00, 0x00, 0x00, 0x00,
1701 0x00, 0x00, 0x00, 0x00,
1703 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1704 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1705 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
1707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1708 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1709 0x00, 0x00, 0x00, 0x00,
1710 0x00, 0x00, 0x00, 0x00,
1711 0x00, 0x00, 0x00, 0x00,
1712 0x00, 0x00, 0x00, 0x00,
1713 0x00, 0x00, 0x00, 0x00,
1714 0x00, 0x00, 0x00, 0x00,
1715 0x00, 0x00, 0x00, 0x00,
1716 0x00, 0x00, 0x00, 0x00,
1718 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1719 0x00, 0x10, 0x00, 0x00,
1721 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1722 0x00, 0x00, 0x00, 0x00,
1724 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1727 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1728 { ICE_MAC_OFOS, 0 },
1729 { ICE_VLAN_EX, 12 },
1730 { ICE_VLAN_IN, 16 },
1731 { ICE_ETYPE_OL, 20 },
1733 { ICE_PROTOCOL_LAST, 0 },
1737 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1738 { ICE_MAC_OFOS, 0 },
1739 { ICE_VLAN_EX, 12 },
1740 { ICE_VLAN_IN, 16 },
1741 { ICE_ETYPE_OL, 20 },
1743 { ICE_IPV4_OFOS, 30 },
1744 { ICE_PROTOCOL_LAST, 0 },
1747 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1748 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1749 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, 0x00, 0x00,
1752 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1753 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1754 0x88, 0x64, /* ICE_ETYPE_OL 20 */
1756 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1759 0x00, 0x21, /* PPP Link Layer 28 */
1761 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1762 0x00, 0x00, 0x00, 0x00,
1763 0x00, 0x00, 0x00, 0x00,
1764 0x00, 0x00, 0x00, 0x00,
1765 0x00, 0x00, 0x00, 0x00,
1767 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1771 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1772 { ICE_MAC_OFOS, 0 },
1773 { ICE_VLAN_EX, 12 },
1774 { ICE_VLAN_IN, 16 },
1775 { ICE_ETYPE_OL, 20 },
1777 { ICE_IPV6_OFOS, 30 },
1778 { ICE_PROTOCOL_LAST, 0 },
1781 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1782 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1783 0x00, 0x00, 0x00, 0x00,
1784 0x00, 0x00, 0x00, 0x00,
1786 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1787 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1788 0x88, 0x64, /* ICE_ETYPE_OL 20 */
1790 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1793 0x00, 0x57, /* PPP Link Layer 28*/
1795 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1796 0x00, 0x00, 0x3b, 0x00,
1797 0x00, 0x00, 0x00, 0x00,
1798 0x00, 0x00, 0x00, 0x00,
1799 0x00, 0x00, 0x00, 0x00,
1800 0x00, 0x00, 0x00, 0x00,
1801 0x00, 0x00, 0x00, 0x00,
1802 0x00, 0x00, 0x00, 0x00,
1803 0x00, 0x00, 0x00, 0x00,
1804 0x00, 0x00, 0x00, 0x00,
1806 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1809 /* this is a recipe to profile association bitmap */
1810 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1811 ICE_MAX_NUM_PROFILES);
1813 /* this is a profile to recipe association bitmap */
1814 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1815 ICE_MAX_NUM_RECIPES);
1817 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1820 * ice_collect_result_idx - copy result index values
1821 * @buf: buffer that contains the result index
1822 * @recp: the recipe struct to copy data into
1824 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1825 struct ice_sw_recipe *recp)
1827 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1828 ice_set_bit(buf->content.result_indx &
1829 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1832 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1833 { ICE_PROFID_IPV4_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV4},
1834 { ICE_PROFID_IPV4_GTPU_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1835 { ICE_PROFID_IPV4_GTPU_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1836 { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1837 { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1838 { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1839 { ICE_PROFID_IPV4_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV6},
1840 { ICE_PROFID_IPV4_GTPU_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1841 { ICE_PROFID_IPV4_GTPU_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1842 { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1843 { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1844 { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1845 { ICE_PROFID_IPV6_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV4},
1846 { ICE_PROFID_IPV6_GTPU_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1847 { ICE_PROFID_IPV6_GTPU_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1848 { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1849 { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1850 { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1851 { ICE_PROFID_IPV6_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV6},
1852 { ICE_PROFID_IPV6_GTPU_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1853 { ICE_PROFID_IPV6_GTPU_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1854 { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1855 { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1856 { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1860 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1861 * @rid: recipe ID that we are populating
1863 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1865 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1866 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1867 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1868 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1869 enum ice_sw_tunnel_type tun_type;
1870 u16 i, j, k, profile_num = 0;
1871 bool non_tun_valid = false;
1872 bool pppoe_valid = false;
1873 bool vxlan_valid = false;
1874 bool gre_valid = false;
1875 bool gtp_valid = false;
1876 bool flag_valid = false;
1878 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1879 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1884 for (i = 0; i < 12; i++) {
1885 if (gre_profile[i] == j)
1889 for (i = 0; i < 12; i++) {
1890 if (vxlan_profile[i] == j)
1894 for (i = 0; i < 7; i++) {
1895 if (pppoe_profile[i] == j)
1899 for (i = 0; i < 6; i++) {
1900 if (non_tun_profile[i] == j)
1901 non_tun_valid = true;
1904 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1905 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1908 if ((j >= ICE_PROFID_IPV4_ESP &&
1909 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1910 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1911 j <= ICE_PROFID_IPV6_GTPU_TEID))
1915 if (!non_tun_valid && vxlan_valid)
1916 tun_type = ICE_SW_TUN_VXLAN;
1917 else if (!non_tun_valid && gre_valid)
1918 tun_type = ICE_SW_TUN_NVGRE;
1919 else if (!non_tun_valid && pppoe_valid)
1920 tun_type = ICE_SW_TUN_PPPOE;
1921 else if (!non_tun_valid && gtp_valid)
1922 tun_type = ICE_SW_TUN_GTP;
1923 else if (non_tun_valid &&
1924 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1925 tun_type = ICE_SW_TUN_AND_NON_TUN;
1926 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1928 tun_type = ICE_NON_TUN;
1930 tun_type = ICE_NON_TUN;
1932 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1933 i = ice_is_bit_set(recipe_to_profile[rid],
1934 ICE_PROFID_PPPOE_IPV4_OTHER);
1935 j = ice_is_bit_set(recipe_to_profile[rid],
1936 ICE_PROFID_PPPOE_IPV6_OTHER);
1938 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1940 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1943 if (tun_type == ICE_SW_TUN_GTP) {
1944 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
1945 if (ice_is_bit_set(recipe_to_profile[rid],
1946 ice_prof_type_tbl[k].prof_id)) {
1947 tun_type = ice_prof_type_tbl[k].type;
1952 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1953 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1954 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1956 case ICE_PROFID_IPV4_TCP:
1957 tun_type = ICE_SW_IPV4_TCP;
1959 case ICE_PROFID_IPV4_UDP:
1960 tun_type = ICE_SW_IPV4_UDP;
1962 case ICE_PROFID_IPV6_TCP:
1963 tun_type = ICE_SW_IPV6_TCP;
1965 case ICE_PROFID_IPV6_UDP:
1966 tun_type = ICE_SW_IPV6_UDP;
1968 case ICE_PROFID_PPPOE_PAY:
1969 tun_type = ICE_SW_TUN_PPPOE_PAY;
1971 case ICE_PROFID_PPPOE_IPV4_TCP:
1972 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1974 case ICE_PROFID_PPPOE_IPV4_UDP:
1975 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1977 case ICE_PROFID_PPPOE_IPV4_OTHER:
1978 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1980 case ICE_PROFID_PPPOE_IPV6_TCP:
1981 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1983 case ICE_PROFID_PPPOE_IPV6_UDP:
1984 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1986 case ICE_PROFID_PPPOE_IPV6_OTHER:
1987 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1989 case ICE_PROFID_IPV4_ESP:
1990 tun_type = ICE_SW_TUN_IPV4_ESP;
1992 case ICE_PROFID_IPV6_ESP:
1993 tun_type = ICE_SW_TUN_IPV6_ESP;
1995 case ICE_PROFID_IPV4_AH:
1996 tun_type = ICE_SW_TUN_IPV4_AH;
1998 case ICE_PROFID_IPV6_AH:
1999 tun_type = ICE_SW_TUN_IPV6_AH;
2001 case ICE_PROFID_IPV4_NAT_T:
2002 tun_type = ICE_SW_TUN_IPV4_NAT_T;
2004 case ICE_PROFID_IPV6_NAT_T:
2005 tun_type = ICE_SW_TUN_IPV6_NAT_T;
2007 case ICE_PROFID_IPV4_PFCP_NODE:
2009 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2011 case ICE_PROFID_IPV6_PFCP_NODE:
2013 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2015 case ICE_PROFID_IPV4_PFCP_SESSION:
2017 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2019 case ICE_PROFID_IPV6_PFCP_SESSION:
2021 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2023 case ICE_PROFID_MAC_IPV4_L2TPV3:
2024 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2026 case ICE_PROFID_MAC_IPV6_L2TPV3:
2027 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2029 case ICE_PROFID_IPV4_GTPU_TEID:
2030 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2032 case ICE_PROFID_IPV6_GTPU_TEID:
2033 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2044 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2045 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2046 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2047 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2048 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2049 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2050 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2051 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2052 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2053 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2054 else if (vlan && tun_type == ICE_NON_TUN)
2055 tun_type = ICE_NON_TUN_QINQ;
2061 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2062 * @hw: pointer to hardware structure
2063 * @recps: struct that we need to populate
2064 * @rid: recipe ID that we are populating
2065 * @refresh_required: true if we should get recipe to profile mapping from FW
2067 * This function is used to populate all the necessary entries into our
2068 * bookkeeping so that we have a current list of all the recipes that are
2069 * programmed in the firmware.
2071 static enum ice_status
2072 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2073 bool *refresh_required)
2075 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2076 struct ice_aqc_recipe_data_elem *tmp;
2077 u16 num_recps = ICE_MAX_NUM_RECIPES;
2078 struct ice_prot_lkup_ext *lkup_exts;
2079 enum ice_status status;
2084 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2086 /* we need a buffer big enough to accommodate all the recipes */
2087 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2088 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2090 return ICE_ERR_NO_MEMORY;
2092 tmp[0].recipe_indx = rid;
2093 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2094 /* non-zero status meaning recipe doesn't exist */
2098 /* Get recipe to profile map so that we can get the fv from lkups that
2099 * we read for a recipe from FW. Since we want to minimize the number of
2100 * times we make this FW call, just make one call and cache the copy
2101 * until a new recipe is added. This operation is only required the
2102 * first time to get the changes from FW. Then to search existing
2103 * entries we don't need to update the cache again until another recipe
2106 if (*refresh_required) {
2107 ice_get_recp_to_prof_map(hw);
2108 *refresh_required = false;
2111 /* Start populating all the entries for recps[rid] based on lkups from
2112 * firmware. Note that we are only creating the root recipe in our
2115 lkup_exts = &recps[rid].lkup_exts;
2117 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2118 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2119 struct ice_recp_grp_entry *rg_entry;
2120 u8 i, prof, idx, prot = 0;
2124 rg_entry = (struct ice_recp_grp_entry *)
2125 ice_malloc(hw, sizeof(*rg_entry));
2127 status = ICE_ERR_NO_MEMORY;
2131 idx = root_bufs.recipe_indx;
2132 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2134 /* Mark all result indices in this chain */
2135 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2136 ice_set_bit(root_bufs.content.result_indx &
2137 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2139 /* get the first profile that is associated with rid */
2140 prof = ice_find_first_bit(recipe_to_profile[idx],
2141 ICE_MAX_NUM_PROFILES);
2142 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2143 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2145 rg_entry->fv_idx[i] = lkup_indx;
2146 rg_entry->fv_mask[i] =
2147 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2149 /* If the recipe is a chained recipe then all its
2150 * child recipe's result will have a result index.
2151 * To fill fv_words we should not use those result
2152 * index, we only need the protocol ids and offsets.
2153 * We will skip all the fv_idx which stores result
2154 * index in them. We also need to skip any fv_idx which
2155 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2156 * valid offset value.
2158 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2159 rg_entry->fv_idx[i]) ||
2160 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2161 rg_entry->fv_idx[i] == 0)
2164 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2165 rg_entry->fv_idx[i], &prot, &off);
2166 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2167 lkup_exts->fv_words[fv_word_idx].off = off;
2168 lkup_exts->field_mask[fv_word_idx] =
2169 rg_entry->fv_mask[i];
2170 if (prot == ICE_META_DATA_ID_HW &&
2171 off == ICE_TUN_FLAG_MDID_OFF)
2175 /* populate rg_list with the data from the child entry of this
2178 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2180 /* Propagate some data to the recipe database */
2181 recps[idx].is_root = !!is_root;
2182 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2183 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2184 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2185 recps[idx].chain_idx = root_bufs.content.result_indx &
2186 ~ICE_AQ_RECIPE_RESULT_EN;
2187 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2189 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2195 /* Only do the following for root recipes entries */
2196 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2197 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2198 recps[idx].root_rid = root_bufs.content.rid &
2199 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2200 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2203 /* Complete initialization of the root recipe entry */
2204 lkup_exts->n_val_words = fv_word_idx;
2205 recps[rid].big_recp = (num_recps > 1);
2206 recps[rid].n_grp_count = (u8)num_recps;
2207 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2208 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2209 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2210 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2211 if (!recps[rid].root_buf)
2214 /* Copy result indexes */
2215 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2216 recps[rid].recp_created = true;
2224 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2225 * @hw: pointer to hardware structure
2227 * This function is used to populate recipe_to_profile matrix where index to
2228 * this array is the recipe ID and the element is the mapping of which profiles
2229 * is this recipe mapped to.
2231 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2233 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2236 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2239 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2240 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2241 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2243 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2244 ICE_MAX_NUM_RECIPES);
2245 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2246 ice_set_bit(i, recipe_to_profile[j]);
2251 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2252 * @hw: pointer to the HW struct
2253 * @recp_list: pointer to sw recipe list
2255 * Allocate memory for the entire recipe table and initialize the structures/
2256 * entries corresponding to basic recipes.
2259 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2261 struct ice_sw_recipe *recps;
2264 recps = (struct ice_sw_recipe *)
2265 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2267 return ICE_ERR_NO_MEMORY;
2269 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2270 recps[i].root_rid = i;
2271 INIT_LIST_HEAD(&recps[i].filt_rules);
2272 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2273 INIT_LIST_HEAD(&recps[i].rg_list);
2274 ice_init_lock(&recps[i].filt_rule_lock);
2283 * ice_aq_get_sw_cfg - get switch configuration
2284 * @hw: pointer to the hardware structure
2285 * @buf: pointer to the result buffer
2286 * @buf_size: length of the buffer available for response
2287 * @req_desc: pointer to requested descriptor
2288 * @num_elems: pointer to number of elements
2289 * @cd: pointer to command details structure or NULL
2291 * Get switch configuration (0x0200) to be placed in buf.
2292 * This admin command returns information such as initial VSI/port number
2293 * and switch ID it belongs to.
2295 * NOTE: *req_desc is both an input/output parameter.
2296 * The caller of this function first calls this function with *request_desc set
2297 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2298 * configuration information has been returned; if non-zero (meaning not all
2299 * the information was returned), the caller should call this function again
2300 * with *req_desc set to the previous value returned by f/w to get the
2301 * next block of switch configuration information.
2303 * *num_elems is output only parameter. This reflects the number of elements
2304 * in response buffer. The caller of this function to use *num_elems while
2305 * parsing the response buffer.
2307 static enum ice_status
2308 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2309 u16 buf_size, u16 *req_desc, u16 *num_elems,
2310 struct ice_sq_cd *cd)
2312 struct ice_aqc_get_sw_cfg *cmd;
2313 struct ice_aq_desc desc;
2314 enum ice_status status;
2316 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2317 cmd = &desc.params.get_sw_conf;
2318 cmd->element = CPU_TO_LE16(*req_desc);
2320 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2322 *req_desc = LE16_TO_CPU(cmd->element);
2323 *num_elems = LE16_TO_CPU(cmd->num_elems);
2330 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2331 * @hw: pointer to the HW struct
2332 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2333 * @global_lut_id: output parameter for the RSS global LUT's ID
2335 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2337 struct ice_aqc_alloc_free_res_elem *sw_buf;
2338 enum ice_status status;
2341 buf_len = ice_struct_size(sw_buf, elem, 1);
2342 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2344 return ICE_ERR_NO_MEMORY;
2346 sw_buf->num_elems = CPU_TO_LE16(1);
2347 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2348 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2349 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2351 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2353 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2354 shared_res ? "shared" : "dedicated", status);
2355 goto ice_alloc_global_lut_exit;
2358 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2360 ice_alloc_global_lut_exit:
2361 ice_free(hw, sw_buf);
2366 * ice_free_rss_global_lut - free a RSS global LUT
2367 * @hw: pointer to the HW struct
2368 * @global_lut_id: ID of the RSS global LUT to free
2370 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2372 struct ice_aqc_alloc_free_res_elem *sw_buf;
2373 u16 buf_len, num_elems = 1;
2374 enum ice_status status;
2376 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2377 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2379 return ICE_ERR_NO_MEMORY;
2381 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2382 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2383 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2385 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2387 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2388 global_lut_id, status);
2390 ice_free(hw, sw_buf);
2395 * ice_alloc_sw - allocate resources specific to switch
2396 * @hw: pointer to the HW struct
2397 * @ena_stats: true to turn on VEB stats
2398 * @shared_res: true for shared resource, false for dedicated resource
2399 * @sw_id: switch ID returned
2400 * @counter_id: VEB counter ID returned
2402 * allocates switch resources (SWID and VEB counter) (0x0208)
2405 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2408 struct ice_aqc_alloc_free_res_elem *sw_buf;
2409 struct ice_aqc_res_elem *sw_ele;
2410 enum ice_status status;
2413 buf_len = ice_struct_size(sw_buf, elem, 1);
2414 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2416 return ICE_ERR_NO_MEMORY;
2418 /* Prepare buffer for switch ID.
2419 * The number of resource entries in buffer is passed as 1 since only a
2420 * single switch/VEB instance is allocated, and hence a single sw_id
2423 sw_buf->num_elems = CPU_TO_LE16(1);
2425 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2426 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2427 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2429 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2430 ice_aqc_opc_alloc_res, NULL);
2433 goto ice_alloc_sw_exit;
2435 sw_ele = &sw_buf->elem[0];
2436 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2439 /* Prepare buffer for VEB Counter */
2440 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2441 struct ice_aqc_alloc_free_res_elem *counter_buf;
2442 struct ice_aqc_res_elem *counter_ele;
2444 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2445 ice_malloc(hw, buf_len);
2447 status = ICE_ERR_NO_MEMORY;
2448 goto ice_alloc_sw_exit;
2451 /* The number of resource entries in buffer is passed as 1 since
2452 * only a single switch/VEB instance is allocated, and hence a
2453 * single VEB counter is requested.
2455 counter_buf->num_elems = CPU_TO_LE16(1);
2456 counter_buf->res_type =
2457 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2458 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2459 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2463 ice_free(hw, counter_buf);
2464 goto ice_alloc_sw_exit;
2466 counter_ele = &counter_buf->elem[0];
2467 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2468 ice_free(hw, counter_buf);
2472 ice_free(hw, sw_buf);
2477 * ice_free_sw - free resources specific to switch
2478 * @hw: pointer to the HW struct
2479 * @sw_id: switch ID returned
2480 * @counter_id: VEB counter ID returned
2482 * free switch resources (SWID and VEB counter) (0x0209)
2484 * NOTE: This function frees multiple resources. It continues
2485 * releasing other resources even after it encounters error.
2486 * The error code returned is the last error it encountered.
2488 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2490 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2491 enum ice_status status, ret_status;
2494 buf_len = ice_struct_size(sw_buf, elem, 1);
2495 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2497 return ICE_ERR_NO_MEMORY;
2499 /* Prepare buffer to free for switch ID res.
2500 * The number of resource entries in buffer is passed as 1 since only a
2501 * single switch/VEB instance is freed, and hence a single sw_id
2504 sw_buf->num_elems = CPU_TO_LE16(1);
2505 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2506 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2508 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2509 ice_aqc_opc_free_res, NULL);
2512 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2514 /* Prepare buffer to free for VEB Counter resource */
2515 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2516 ice_malloc(hw, buf_len);
2518 ice_free(hw, sw_buf);
2519 return ICE_ERR_NO_MEMORY;
2522 /* The number of resource entries in buffer is passed as 1 since only a
2523 * single switch/VEB instance is freed, and hence a single VEB counter
2526 counter_buf->num_elems = CPU_TO_LE16(1);
2527 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2528 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2530 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2531 ice_aqc_opc_free_res, NULL);
2533 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2534 ret_status = status;
2537 ice_free(hw, counter_buf);
2538 ice_free(hw, sw_buf);
2544 * @hw: pointer to the HW struct
2545 * @vsi_ctx: pointer to a VSI context struct
2546 * @cd: pointer to command details structure or NULL
2548 * Add a VSI context to the hardware (0x0210)
2551 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2552 struct ice_sq_cd *cd)
2554 struct ice_aqc_add_update_free_vsi_resp *res;
2555 struct ice_aqc_add_get_update_free_vsi *cmd;
2556 struct ice_aq_desc desc;
2557 enum ice_status status;
2559 cmd = &desc.params.vsi_cmd;
2560 res = &desc.params.add_update_free_vsi_res;
2562 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2564 if (!vsi_ctx->alloc_from_pool)
2565 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2566 ICE_AQ_VSI_IS_VALID);
2568 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2570 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2572 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2573 sizeof(vsi_ctx->info), cd);
2576 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2577 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2578 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2586 * @hw: pointer to the HW struct
2587 * @vsi_ctx: pointer to a VSI context struct
2588 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2589 * @cd: pointer to command details structure or NULL
2591 * Free VSI context info from hardware (0x0213)
2594 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2595 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2597 struct ice_aqc_add_update_free_vsi_resp *resp;
2598 struct ice_aqc_add_get_update_free_vsi *cmd;
2599 struct ice_aq_desc desc;
2600 enum ice_status status;
2602 cmd = &desc.params.vsi_cmd;
2603 resp = &desc.params.add_update_free_vsi_res;
2605 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2607 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2609 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2611 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2613 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2614 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2622 * @hw: pointer to the HW struct
2623 * @vsi_ctx: pointer to a VSI context struct
2624 * @cd: pointer to command details structure or NULL
2626 * Update VSI context in the hardware (0x0211)
2629 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2630 struct ice_sq_cd *cd)
2632 struct ice_aqc_add_update_free_vsi_resp *resp;
2633 struct ice_aqc_add_get_update_free_vsi *cmd;
2634 struct ice_aq_desc desc;
2635 enum ice_status status;
2637 cmd = &desc.params.vsi_cmd;
2638 resp = &desc.params.add_update_free_vsi_res;
2640 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2642 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2644 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2646 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2647 sizeof(vsi_ctx->info), cd);
2650 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2651 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2658 * ice_is_vsi_valid - check whether the VSI is valid or not
2659 * @hw: pointer to the HW struct
2660 * @vsi_handle: VSI handle
2662 * check whether the VSI is valid or not
2664 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2666 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2670 * ice_get_hw_vsi_num - return the HW VSI number
2671 * @hw: pointer to the HW struct
2672 * @vsi_handle: VSI handle
2674 * return the HW VSI number
2675 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2677 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2679 return hw->vsi_ctx[vsi_handle]->vsi_num;
2683 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2684 * @hw: pointer to the HW struct
2685 * @vsi_handle: VSI handle
2687 * return the VSI context entry for a given VSI handle
2689 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2691 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2695 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2696 * @hw: pointer to the HW struct
2697 * @vsi_handle: VSI handle
2698 * @vsi: VSI context pointer
2700 * save the VSI context entry for a given VSI handle
2703 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2705 hw->vsi_ctx[vsi_handle] = vsi;
2709 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2710 * @hw: pointer to the HW struct
2711 * @vsi_handle: VSI handle
2713 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2715 struct ice_vsi_ctx *vsi;
2718 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2721 ice_for_each_traffic_class(i) {
2722 if (vsi->lan_q_ctx[i]) {
2723 ice_free(hw, vsi->lan_q_ctx[i]);
2724 vsi->lan_q_ctx[i] = NULL;
2730 * ice_clear_vsi_ctx - clear the VSI context entry
2731 * @hw: pointer to the HW struct
2732 * @vsi_handle: VSI handle
2734 * clear the VSI context entry
2736 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2738 struct ice_vsi_ctx *vsi;
2740 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2742 ice_clear_vsi_q_ctx(hw, vsi_handle);
2744 hw->vsi_ctx[vsi_handle] = NULL;
2749 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2750 * @hw: pointer to the HW struct
2752 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2756 for (i = 0; i < ICE_MAX_VSI; i++)
2757 ice_clear_vsi_ctx(hw, i);
2761 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2762 * @hw: pointer to the HW struct
2763 * @vsi_handle: unique VSI handle provided by drivers
2764 * @vsi_ctx: pointer to a VSI context struct
2765 * @cd: pointer to command details structure or NULL
2767 * Add a VSI context to the hardware also add it into the VSI handle list.
2768 * If this function gets called after reset for existing VSIs then update
2769 * with the new HW VSI number in the corresponding VSI handle list entry.
2772 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2773 struct ice_sq_cd *cd)
2775 struct ice_vsi_ctx *tmp_vsi_ctx;
2776 enum ice_status status;
2778 if (vsi_handle >= ICE_MAX_VSI)
2779 return ICE_ERR_PARAM;
2780 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2783 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2785 /* Create a new VSI context */
2786 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2787 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2789 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2790 return ICE_ERR_NO_MEMORY;
2792 *tmp_vsi_ctx = *vsi_ctx;
2794 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2796 /* update with new HW VSI num */
2797 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2804 * ice_free_vsi- free VSI context from hardware and VSI handle list
2805 * @hw: pointer to the HW struct
2806 * @vsi_handle: unique VSI handle
2807 * @vsi_ctx: pointer to a VSI context struct
2808 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2809 * @cd: pointer to command details structure or NULL
2811 * Free VSI context info from hardware as well as from VSI handle list
2814 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2815 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2817 enum ice_status status;
2819 if (!ice_is_vsi_valid(hw, vsi_handle))
2820 return ICE_ERR_PARAM;
2821 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2822 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2824 ice_clear_vsi_ctx(hw, vsi_handle);
2830 * @hw: pointer to the HW struct
2831 * @vsi_handle: unique VSI handle
2832 * @vsi_ctx: pointer to a VSI context struct
2833 * @cd: pointer to command details structure or NULL
2835 * Update VSI context in the hardware
2838 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2839 struct ice_sq_cd *cd)
2841 if (!ice_is_vsi_valid(hw, vsi_handle))
2842 return ICE_ERR_PARAM;
2843 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2844 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2848 * ice_aq_get_vsi_params
2849 * @hw: pointer to the HW struct
2850 * @vsi_ctx: pointer to a VSI context struct
2851 * @cd: pointer to command details structure or NULL
2853 * Get VSI context info from hardware (0x0212)
2856 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2857 struct ice_sq_cd *cd)
2859 struct ice_aqc_add_get_update_free_vsi *cmd;
2860 struct ice_aqc_get_vsi_resp *resp;
2861 struct ice_aq_desc desc;
2862 enum ice_status status;
2864 cmd = &desc.params.vsi_cmd;
2865 resp = &desc.params.get_vsi_resp;
2867 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2869 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2871 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2872 sizeof(vsi_ctx->info), cd);
2874 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2876 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2877 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2884 * ice_aq_add_update_mir_rule - add/update a mirror rule
2885 * @hw: pointer to the HW struct
2886 * @rule_type: Rule Type
2887 * @dest_vsi: VSI number to which packets will be mirrored
2888 * @count: length of the list
2889 * @mr_buf: buffer for list of mirrored VSI numbers
2890 * @cd: pointer to command details structure or NULL
2893 * Add/Update Mirror Rule (0x260).
2896 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2897 u16 count, struct ice_mir_rule_buf *mr_buf,
2898 struct ice_sq_cd *cd, u16 *rule_id)
2900 struct ice_aqc_add_update_mir_rule *cmd;
2901 struct ice_aq_desc desc;
2902 enum ice_status status;
2903 __le16 *mr_list = NULL;
2906 switch (rule_type) {
2907 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2908 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2909 /* Make sure count and mr_buf are set for these rule_types */
2910 if (!(count && mr_buf))
2911 return ICE_ERR_PARAM;
2913 buf_size = count * sizeof(__le16);
2914 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2916 return ICE_ERR_NO_MEMORY;
2918 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2919 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2920 /* Make sure count and mr_buf are not set for these
2923 if (count || mr_buf)
2924 return ICE_ERR_PARAM;
2927 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2928 return ICE_ERR_OUT_OF_RANGE;
2931 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2933 /* Pre-process 'mr_buf' items for add/update of virtual port
2934 * ingress/egress mirroring (but not physical port ingress/egress
2940 for (i = 0; i < count; i++) {
2943 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2945 /* Validate specified VSI number, make sure it is less
2946 * than ICE_MAX_VSI, if not return with error.
2948 if (id >= ICE_MAX_VSI) {
2949 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2951 ice_free(hw, mr_list);
2952 return ICE_ERR_OUT_OF_RANGE;
2955 /* add VSI to mirror rule */
2958 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2959 else /* remove VSI from mirror rule */
2960 mr_list[i] = CPU_TO_LE16(id);
2964 cmd = &desc.params.add_update_rule;
2965 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2966 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2967 ICE_AQC_RULE_ID_VALID_M);
2968 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2969 cmd->num_entries = CPU_TO_LE16(count);
2970 cmd->dest = CPU_TO_LE16(dest_vsi);
2972 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2974 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2976 ice_free(hw, mr_list);
2982 * ice_aq_delete_mir_rule - delete a mirror rule
2983 * @hw: pointer to the HW struct
2984 * @rule_id: Mirror rule ID (to be deleted)
2985 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2986 * otherwise it is returned to the shared pool
2987 * @cd: pointer to command details structure or NULL
2989 * Delete Mirror Rule (0x261).
2992 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2993 struct ice_sq_cd *cd)
2995 struct ice_aqc_delete_mir_rule *cmd;
2996 struct ice_aq_desc desc;
2998 /* rule_id should be in the range 0...63 */
2999 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3000 return ICE_ERR_OUT_OF_RANGE;
3002 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3004 cmd = &desc.params.del_rule;
3005 rule_id |= ICE_AQC_RULE_ID_VALID_M;
3006 cmd->rule_id = CPU_TO_LE16(rule_id);
3009 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3011 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3015 * ice_aq_alloc_free_vsi_list
3016 * @hw: pointer to the HW struct
3017 * @vsi_list_id: VSI list ID returned or used for lookup
3018 * @lkup_type: switch rule filter lookup type
3019 * @opc: switch rules population command type - pass in the command opcode
3021 * allocates or free a VSI list resource
3023 static enum ice_status
3024 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3025 enum ice_sw_lkup_type lkup_type,
3026 enum ice_adminq_opc opc)
3028 struct ice_aqc_alloc_free_res_elem *sw_buf;
3029 struct ice_aqc_res_elem *vsi_ele;
3030 enum ice_status status;
3033 buf_len = ice_struct_size(sw_buf, elem, 1);
3034 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3036 return ICE_ERR_NO_MEMORY;
3037 sw_buf->num_elems = CPU_TO_LE16(1);
3039 if (lkup_type == ICE_SW_LKUP_MAC ||
3040 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3041 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3042 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3043 lkup_type == ICE_SW_LKUP_PROMISC ||
3044 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3045 lkup_type == ICE_SW_LKUP_LAST) {
3046 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3047 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3049 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3051 status = ICE_ERR_PARAM;
3052 goto ice_aq_alloc_free_vsi_list_exit;
3055 if (opc == ice_aqc_opc_free_res)
3056 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3058 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3060 goto ice_aq_alloc_free_vsi_list_exit;
3062 if (opc == ice_aqc_opc_alloc_res) {
3063 vsi_ele = &sw_buf->elem[0];
3064 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3067 ice_aq_alloc_free_vsi_list_exit:
3068 ice_free(hw, sw_buf);
3073 * ice_aq_set_storm_ctrl - Sets storm control configuration
3074 * @hw: pointer to the HW struct
3075 * @bcast_thresh: represents the upper threshold for broadcast storm control
3076 * @mcast_thresh: represents the upper threshold for multicast storm control
3077 * @ctl_bitmask: storm control knobs
3079 * Sets the storm control configuration (0x0280)
3082 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3085 struct ice_aqc_storm_cfg *cmd;
3086 struct ice_aq_desc desc;
3088 cmd = &desc.params.storm_conf;
3090 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3092 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3093 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3094 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3096 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3100 * ice_aq_get_storm_ctrl - gets storm control configuration
3101 * @hw: pointer to the HW struct
3102 * @bcast_thresh: represents the upper threshold for broadcast storm control
3103 * @mcast_thresh: represents the upper threshold for multicast storm control
3104 * @ctl_bitmask: storm control knobs
3106 * Gets the storm control configuration (0x0281)
3109 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3112 enum ice_status status;
3113 struct ice_aq_desc desc;
3115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3117 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3119 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3122 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3125 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3128 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3135 * ice_aq_sw_rules - add/update/remove switch rules
3136 * @hw: pointer to the HW struct
3137 * @rule_list: pointer to switch rule population list
3138 * @rule_list_sz: total size of the rule list in bytes
3139 * @num_rules: number of switch rules in the rule_list
3140 * @opc: switch rules population command type - pass in the command opcode
3141 * @cd: pointer to command details structure or NULL
3143 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3145 static enum ice_status
3146 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3147 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3149 struct ice_aq_desc desc;
3150 enum ice_status status;
3152 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3154 if (opc != ice_aqc_opc_add_sw_rules &&
3155 opc != ice_aqc_opc_update_sw_rules &&
3156 opc != ice_aqc_opc_remove_sw_rules)
3157 return ICE_ERR_PARAM;
3159 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3161 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3162 desc.params.sw_rules.num_rules_fltr_entry_index =
3163 CPU_TO_LE16(num_rules);
3164 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3165 if (opc != ice_aqc_opc_add_sw_rules &&
3166 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3167 status = ICE_ERR_DOES_NOT_EXIST;
3173 * ice_aq_add_recipe - add switch recipe
3174 * @hw: pointer to the HW struct
3175 * @s_recipe_list: pointer to switch rule population list
3176 * @num_recipes: number of switch recipes in the list
3177 * @cd: pointer to command details structure or NULL
3182 ice_aq_add_recipe(struct ice_hw *hw,
3183 struct ice_aqc_recipe_data_elem *s_recipe_list,
3184 u16 num_recipes, struct ice_sq_cd *cd)
3186 struct ice_aqc_add_get_recipe *cmd;
3187 struct ice_aq_desc desc;
3190 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3191 cmd = &desc.params.add_get_recipe;
3192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3194 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3195 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3197 buf_size = num_recipes * sizeof(*s_recipe_list);
3199 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3203 * ice_aq_get_recipe - get switch recipe
3204 * @hw: pointer to the HW struct
3205 * @s_recipe_list: pointer to switch rule population list
3206 * @num_recipes: pointer to the number of recipes (input and output)
3207 * @recipe_root: root recipe number of recipe(s) to retrieve
3208 * @cd: pointer to command details structure or NULL
3212 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3213 * On output, *num_recipes will equal the number of entries returned in
3216 * The caller must supply enough space in s_recipe_list to hold all possible
3217 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3220 ice_aq_get_recipe(struct ice_hw *hw,
3221 struct ice_aqc_recipe_data_elem *s_recipe_list,
3222 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3224 struct ice_aqc_add_get_recipe *cmd;
3225 struct ice_aq_desc desc;
3226 enum ice_status status;
3229 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3230 return ICE_ERR_PARAM;
3232 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3233 cmd = &desc.params.add_get_recipe;
3234 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3236 cmd->return_index = CPU_TO_LE16(recipe_root);
3237 cmd->num_sub_recipes = 0;
3239 buf_size = *num_recipes * sizeof(*s_recipe_list);
3241 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3242 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3248 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3249 * @hw: pointer to the HW struct
3250 * @params: parameters used to update the default recipe
3252 * This function only supports updating default recipes and it only supports
3253 * updating a single recipe based on the lkup_idx at a time.
3255 * This is done as a read-modify-write operation. First, get the current recipe
3256 * contents based on the recipe's ID. Then modify the field vector index and
3257 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3258 * the pre-existing recipe with the modifications.
3261 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3262 struct ice_update_recipe_lkup_idx_params *params)
3264 struct ice_aqc_recipe_data_elem *rcp_list;
3265 u16 num_recps = ICE_MAX_NUM_RECIPES;
3266 enum ice_status status;
3268 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3270 return ICE_ERR_NO_MEMORY;
3272 /* read current recipe list from firmware */
3273 rcp_list->recipe_indx = params->rid;
3274 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3276 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3277 params->rid, status);
3281 /* only modify existing recipe's lkup_idx and mask if valid, while
3282 * leaving all other fields the same, then update the recipe firmware
3284 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3285 if (params->mask_valid)
3286 rcp_list->content.mask[params->lkup_idx] =
3287 CPU_TO_LE16(params->mask);
3289 if (params->ignore_valid)
3290 rcp_list->content.lkup_indx[params->lkup_idx] |=
3291 ICE_AQ_RECIPE_LKUP_IGNORE;
3293 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3295 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3296 params->rid, params->lkup_idx, params->fv_idx,
3297 params->mask, params->mask_valid ? "true" : "false",
3301 ice_free(hw, rcp_list);
3306 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3307 * @hw: pointer to the HW struct
3308 * @profile_id: package profile ID to associate the recipe with
3309 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3310 * @cd: pointer to command details structure or NULL
3311 * Recipe to profile association (0x0291)
3314 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3315 struct ice_sq_cd *cd)
3317 struct ice_aqc_recipe_to_profile *cmd;
3318 struct ice_aq_desc desc;
3320 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3321 cmd = &desc.params.recipe_to_profile;
3322 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3323 cmd->profile_id = CPU_TO_LE16(profile_id);
3324 /* Set the recipe ID bit in the bitmask to let the device know which
3325 * profile we are associating the recipe to
3327 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3328 ICE_NONDMA_TO_NONDMA);
3330 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3334 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3335 * @hw: pointer to the HW struct
3336 * @profile_id: package profile ID to associate the recipe with
3337 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3338 * @cd: pointer to command details structure or NULL
3339 * Associate profile ID with given recipe (0x0293)
3342 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3343 struct ice_sq_cd *cd)
3345 struct ice_aqc_recipe_to_profile *cmd;
3346 struct ice_aq_desc desc;
3347 enum ice_status status;
3349 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3350 cmd = &desc.params.recipe_to_profile;
3351 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3352 cmd->profile_id = CPU_TO_LE16(profile_id);
3354 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3356 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3357 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3363 * ice_alloc_recipe - add recipe resource
3364 * @hw: pointer to the hardware structure
3365 * @rid: recipe ID returned as response to AQ call
3367 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3369 struct ice_aqc_alloc_free_res_elem *sw_buf;
3370 enum ice_status status;
3373 buf_len = ice_struct_size(sw_buf, elem, 1);
3374 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3376 return ICE_ERR_NO_MEMORY;
3378 sw_buf->num_elems = CPU_TO_LE16(1);
3379 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3380 ICE_AQC_RES_TYPE_S) |
3381 ICE_AQC_RES_TYPE_FLAG_SHARED);
3382 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3383 ice_aqc_opc_alloc_res, NULL);
3385 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3386 ice_free(hw, sw_buf);
3391 /* ice_init_port_info - Initialize port_info with switch configuration data
3392 * @pi: pointer to port_info
3393 * @vsi_port_num: VSI number or port number
3394 * @type: Type of switch element (port or VSI)
3395 * @swid: switch ID of the switch the element is attached to
3396 * @pf_vf_num: PF or VF number
3397 * @is_vf: true if the element is a VF, false otherwise
3400 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3401 u16 swid, u16 pf_vf_num, bool is_vf)
3404 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3405 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3407 pi->pf_vf_num = pf_vf_num;
3409 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3410 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3413 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3418 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3419 * @hw: pointer to the hardware structure
3421 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3423 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3424 enum ice_status status;
3431 num_total_ports = 1;
3433 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3434 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3437 return ICE_ERR_NO_MEMORY;
3439 /* Multiple calls to ice_aq_get_sw_cfg may be required
3440 * to get all the switch configuration information. The need
3441 * for additional calls is indicated by ice_aq_get_sw_cfg
3442 * writing a non-zero value in req_desc
3445 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3447 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3448 &req_desc, &num_elems, NULL);
3453 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3454 u16 pf_vf_num, swid, vsi_port_num;
3458 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3459 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3461 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3462 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3464 swid = LE16_TO_CPU(ele->swid);
3466 if (LE16_TO_CPU(ele->pf_vf_num) &
3467 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3470 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3471 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3474 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3475 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3476 if (j == num_total_ports) {
3477 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3478 status = ICE_ERR_CFG;
3481 ice_init_port_info(hw->port_info,
3482 vsi_port_num, res_type, swid,
3490 } while (req_desc && !status);
3498 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3499 * @hw: pointer to the hardware structure
3500 * @fi: filter info structure to fill/update
3502 * This helper function populates the lb_en and lan_en elements of the provided
3503 * ice_fltr_info struct using the switch's type and characteristics of the
3504 * switch rule being configured.
3506 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3508 if ((fi->flag & ICE_FLTR_RX) &&
3509 (fi->fltr_act == ICE_FWD_TO_VSI ||
3510 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3511 fi->lkup_type == ICE_SW_LKUP_LAST)
3515 if ((fi->flag & ICE_FLTR_TX) &&
3516 (fi->fltr_act == ICE_FWD_TO_VSI ||
3517 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3518 fi->fltr_act == ICE_FWD_TO_Q ||
3519 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3520 /* Setting LB for prune actions will result in replicated
3521 * packets to the internal switch that will be dropped.
3523 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3526 /* Set lan_en to TRUE if
3527 * 1. The switch is a VEB AND
3529 * 2.1 The lookup is a directional lookup like ethertype,
3530 * promiscuous, ethertype-MAC, promiscuous-VLAN
3531 * and default-port OR
3532 * 2.2 The lookup is VLAN, OR
3533 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3534 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3538 * The switch is a VEPA.
3540 * In all other cases, the LAN enable has to be set to false.
3543 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3544 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3545 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3546 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3547 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3548 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3549 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3550 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3551 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3552 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3561 * ice_fill_sw_rule - Helper function to fill switch rule structure
3562 * @hw: pointer to the hardware structure
3563 * @f_info: entry containing packet forwarding information
3564 * @s_rule: switch rule structure to be filled in based on mac_entry
3565 * @opc: switch rules population command type - pass in the command opcode
3568 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3569 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3571 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3572 u16 vlan_tpid = ICE_ETH_P_8021Q;
3580 if (opc == ice_aqc_opc_remove_sw_rules) {
3581 s_rule->pdata.lkup_tx_rx.act = 0;
3582 s_rule->pdata.lkup_tx_rx.index =
3583 CPU_TO_LE16(f_info->fltr_rule_id);
3584 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3588 eth_hdr_sz = sizeof(dummy_eth_header);
3589 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3591 /* initialize the ether header with a dummy header */
3592 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3593 ice_fill_sw_info(hw, f_info);
3595 switch (f_info->fltr_act) {
3596 case ICE_FWD_TO_VSI:
3597 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3598 ICE_SINGLE_ACT_VSI_ID_M;
3599 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3600 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3601 ICE_SINGLE_ACT_VALID_BIT;
3603 case ICE_FWD_TO_VSI_LIST:
3604 act |= ICE_SINGLE_ACT_VSI_LIST;
3605 act |= (f_info->fwd_id.vsi_list_id <<
3606 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3607 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3608 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3609 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3610 ICE_SINGLE_ACT_VALID_BIT;
3613 act |= ICE_SINGLE_ACT_TO_Q;
3614 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3615 ICE_SINGLE_ACT_Q_INDEX_M;
3617 case ICE_DROP_PACKET:
3618 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3619 ICE_SINGLE_ACT_VALID_BIT;
3621 case ICE_FWD_TO_QGRP:
3622 q_rgn = f_info->qgrp_size > 0 ?
3623 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3624 act |= ICE_SINGLE_ACT_TO_Q;
3625 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3626 ICE_SINGLE_ACT_Q_INDEX_M;
3627 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3628 ICE_SINGLE_ACT_Q_REGION_M;
3635 act |= ICE_SINGLE_ACT_LB_ENABLE;
3637 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3639 switch (f_info->lkup_type) {
3640 case ICE_SW_LKUP_MAC:
3641 daddr = f_info->l_data.mac.mac_addr;
3643 case ICE_SW_LKUP_VLAN:
3644 vlan_id = f_info->l_data.vlan.vlan_id;
3645 if (f_info->l_data.vlan.tpid_valid)
3646 vlan_tpid = f_info->l_data.vlan.tpid;
3647 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3648 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3649 act |= ICE_SINGLE_ACT_PRUNE;
3650 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3653 case ICE_SW_LKUP_ETHERTYPE_MAC:
3654 daddr = f_info->l_data.ethertype_mac.mac_addr;
3656 case ICE_SW_LKUP_ETHERTYPE:
3657 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3658 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3660 case ICE_SW_LKUP_MAC_VLAN:
3661 daddr = f_info->l_data.mac_vlan.mac_addr;
3662 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3664 case ICE_SW_LKUP_PROMISC_VLAN:
3665 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3667 case ICE_SW_LKUP_PROMISC:
3668 daddr = f_info->l_data.mac_vlan.mac_addr;
3674 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3675 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3676 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3678 /* Recipe set depending on lookup type */
3679 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3680 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3681 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3684 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3685 ICE_NONDMA_TO_NONDMA);
3687 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3688 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3689 *off = CPU_TO_BE16(vlan_id);
3690 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3691 *off = CPU_TO_BE16(vlan_tpid);
3694 /* Create the switch rule with the final dummy Ethernet header */
3695 if (opc != ice_aqc_opc_update_sw_rules)
3696 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3700 * ice_add_marker_act
3701 * @hw: pointer to the hardware structure
3702 * @m_ent: the management entry for which sw marker needs to be added
3703 * @sw_marker: sw marker to tag the Rx descriptor with
3704 * @l_id: large action resource ID
3706 * Create a large action to hold software marker and update the switch rule
3707 * entry pointed by m_ent with newly created large action
3709 static enum ice_status
3710 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3711 u16 sw_marker, u16 l_id)
3713 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3714 /* For software marker we need 3 large actions
3715 * 1. FWD action: FWD TO VSI or VSI LIST
3716 * 2. GENERIC VALUE action to hold the profile ID
3717 * 3. GENERIC VALUE action to hold the software marker ID
3719 const u16 num_lg_acts = 3;
3720 enum ice_status status;
3726 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3727 return ICE_ERR_PARAM;
3729 /* Create two back-to-back switch rules and submit them to the HW using
3730 * one memory buffer:
3734 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3735 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3736 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3738 return ICE_ERR_NO_MEMORY;
3740 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3742 /* Fill in the first switch rule i.e. large action */
3743 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3744 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3745 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3747 /* First action VSI forwarding or VSI list forwarding depending on how
3750 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3751 m_ent->fltr_info.fwd_id.hw_vsi_id;
3753 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3754 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3755 if (m_ent->vsi_count > 1)
3756 act |= ICE_LG_ACT_VSI_LIST;
3757 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3759 /* Second action descriptor type */
3760 act = ICE_LG_ACT_GENERIC;
3762 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3763 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3765 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3766 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3768 /* Third action Marker value */
3769 act |= ICE_LG_ACT_GENERIC;
3770 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3771 ICE_LG_ACT_GENERIC_VALUE_M;
3773 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3775 /* call the fill switch rule to fill the lookup Tx Rx structure */
3776 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3777 ice_aqc_opc_update_sw_rules);
3779 /* Update the action to point to the large action ID */
3780 rx_tx->pdata.lkup_tx_rx.act =
3781 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3782 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3783 ICE_SINGLE_ACT_PTR_VAL_M));
3785 /* Use the filter rule ID of the previously created rule with single
3786 * act. Once the update happens, hardware will treat this as large
3789 rx_tx->pdata.lkup_tx_rx.index =
3790 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3792 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3793 ice_aqc_opc_update_sw_rules, NULL);
3795 m_ent->lg_act_idx = l_id;
3796 m_ent->sw_marker_id = sw_marker;
3799 ice_free(hw, lg_act);
3804 * ice_add_counter_act - add/update filter rule with counter action
3805 * @hw: pointer to the hardware structure
3806 * @m_ent: the management entry for which counter needs to be added
3807 * @counter_id: VLAN counter ID returned as part of allocate resource
3808 * @l_id: large action resource ID
3810 static enum ice_status
3811 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3812 u16 counter_id, u16 l_id)
3814 struct ice_aqc_sw_rules_elem *lg_act;
3815 struct ice_aqc_sw_rules_elem *rx_tx;
3816 enum ice_status status;
3817 /* 2 actions will be added while adding a large action counter */
3818 const int num_acts = 2;
3825 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3826 return ICE_ERR_PARAM;
3828 /* Create two back-to-back switch rules and submit them to the HW using
3829 * one memory buffer:
3833 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3834 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3835 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3837 return ICE_ERR_NO_MEMORY;
3839 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3841 /* Fill in the first switch rule i.e. large action */
3842 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3843 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3844 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3846 /* First action VSI forwarding or VSI list forwarding depending on how
3849 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3850 m_ent->fltr_info.fwd_id.hw_vsi_id;
3852 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3853 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3854 ICE_LG_ACT_VSI_LIST_ID_M;
3855 if (m_ent->vsi_count > 1)
3856 act |= ICE_LG_ACT_VSI_LIST;
3857 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3859 /* Second action counter ID */
3860 act = ICE_LG_ACT_STAT_COUNT;
3861 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3862 ICE_LG_ACT_STAT_COUNT_M;
3863 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3865 /* call the fill switch rule to fill the lookup Tx Rx structure */
3866 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3867 ice_aqc_opc_update_sw_rules);
3869 act = ICE_SINGLE_ACT_PTR;
3870 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3871 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3873 /* Use the filter rule ID of the previously created rule with single
3874 * act. Once the update happens, hardware will treat this as large
3877 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3878 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3880 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3881 ice_aqc_opc_update_sw_rules, NULL);
3883 m_ent->lg_act_idx = l_id;
3884 m_ent->counter_index = counter_id;
3887 ice_free(hw, lg_act);
3892 * ice_create_vsi_list_map
3893 * @hw: pointer to the hardware structure
3894 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3895 * @num_vsi: number of VSI handles in the array
3896 * @vsi_list_id: VSI list ID generated as part of allocate resource
3898 * Helper function to create a new entry of VSI list ID to VSI mapping
3899 * using the given VSI list ID
3901 static struct ice_vsi_list_map_info *
3902 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3905 struct ice_switch_info *sw = hw->switch_info;
3906 struct ice_vsi_list_map_info *v_map;
3909 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3913 v_map->vsi_list_id = vsi_list_id;
3915 for (i = 0; i < num_vsi; i++)
3916 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3918 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3923 * ice_update_vsi_list_rule
3924 * @hw: pointer to the hardware structure
3925 * @vsi_handle_arr: array of VSI handles to form a VSI list
3926 * @num_vsi: number of VSI handles in the array
3927 * @vsi_list_id: VSI list ID generated as part of allocate resource
3928 * @remove: Boolean value to indicate if this is a remove action
3929 * @opc: switch rules population command type - pass in the command opcode
3930 * @lkup_type: lookup type of the filter
3932 * Call AQ command to add a new switch rule or update existing switch rule
3933 * using the given VSI list ID
3935 static enum ice_status
3936 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3937 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3938 enum ice_sw_lkup_type lkup_type)
3940 struct ice_aqc_sw_rules_elem *s_rule;
3941 enum ice_status status;
3947 return ICE_ERR_PARAM;
3949 if (lkup_type == ICE_SW_LKUP_MAC ||
3950 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3951 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3952 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3953 lkup_type == ICE_SW_LKUP_PROMISC ||
3954 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3955 lkup_type == ICE_SW_LKUP_LAST)
3956 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3957 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3958 else if (lkup_type == ICE_SW_LKUP_VLAN)
3959 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3960 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3962 return ICE_ERR_PARAM;
3964 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3965 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3967 return ICE_ERR_NO_MEMORY;
3968 for (i = 0; i < num_vsi; i++) {
3969 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3970 status = ICE_ERR_PARAM;
3973 /* AQ call requires hw_vsi_id(s) */
3974 s_rule->pdata.vsi_list.vsi[i] =
3975 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3978 s_rule->type = CPU_TO_LE16(rule_type);
3979 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3980 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3982 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3985 ice_free(hw, s_rule);
3990 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3991 * @hw: pointer to the HW struct
3992 * @vsi_handle_arr: array of VSI handles to form a VSI list
3993 * @num_vsi: number of VSI handles in the array
3994 * @vsi_list_id: stores the ID of the VSI list to be created
3995 * @lkup_type: switch rule filter's lookup type
3997 static enum ice_status
3998 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3999 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4001 enum ice_status status;
4003 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4004 ice_aqc_opc_alloc_res);
4008 /* Update the newly created VSI list to include the specified VSIs */
4009 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4010 *vsi_list_id, false,
4011 ice_aqc_opc_add_sw_rules, lkup_type);
4015 * ice_create_pkt_fwd_rule
4016 * @hw: pointer to the hardware structure
4017 * @recp_list: corresponding filter management list
4018 * @f_entry: entry containing packet forwarding information
4020 * Create switch rule with given filter information and add an entry
4021 * to the corresponding filter management list to track this switch rule
4024 static enum ice_status
4025 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4026 struct ice_fltr_list_entry *f_entry)
4028 struct ice_fltr_mgmt_list_entry *fm_entry;
4029 struct ice_aqc_sw_rules_elem *s_rule;
4030 enum ice_status status;
4032 s_rule = (struct ice_aqc_sw_rules_elem *)
4033 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4035 return ICE_ERR_NO_MEMORY;
4036 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4037 ice_malloc(hw, sizeof(*fm_entry));
4039 status = ICE_ERR_NO_MEMORY;
4040 goto ice_create_pkt_fwd_rule_exit;
4043 fm_entry->fltr_info = f_entry->fltr_info;
4045 /* Initialize all the fields for the management entry */
4046 fm_entry->vsi_count = 1;
4047 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4048 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4049 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4051 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4052 ice_aqc_opc_add_sw_rules);
4054 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4055 ice_aqc_opc_add_sw_rules, NULL);
4057 ice_free(hw, fm_entry);
4058 goto ice_create_pkt_fwd_rule_exit;
4061 f_entry->fltr_info.fltr_rule_id =
4062 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4063 fm_entry->fltr_info.fltr_rule_id =
4064 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4066 /* The book keeping entries will get removed when base driver
4067 * calls remove filter AQ command
4069 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4071 ice_create_pkt_fwd_rule_exit:
4072 ice_free(hw, s_rule);
4077 * ice_update_pkt_fwd_rule
4078 * @hw: pointer to the hardware structure
4079 * @f_info: filter information for switch rule
4081 * Call AQ command to update a previously created switch rule with a
4084 static enum ice_status
4085 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4087 struct ice_aqc_sw_rules_elem *s_rule;
4088 enum ice_status status;
4090 s_rule = (struct ice_aqc_sw_rules_elem *)
4091 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4093 return ICE_ERR_NO_MEMORY;
4095 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4097 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4099 /* Update switch rule with new rule set to forward VSI list */
4100 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4101 ice_aqc_opc_update_sw_rules, NULL);
4103 ice_free(hw, s_rule);
4108 * ice_update_sw_rule_bridge_mode
4109 * @hw: pointer to the HW struct
4111 * Updates unicast switch filter rules based on VEB/VEPA mode
4113 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4115 struct ice_switch_info *sw = hw->switch_info;
4116 struct ice_fltr_mgmt_list_entry *fm_entry;
4117 enum ice_status status = ICE_SUCCESS;
4118 struct LIST_HEAD_TYPE *rule_head;
4119 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4121 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4122 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4124 ice_acquire_lock(rule_lock);
4125 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4127 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4128 u8 *addr = fi->l_data.mac.mac_addr;
4130 /* Update unicast Tx rules to reflect the selected
4133 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4134 (fi->fltr_act == ICE_FWD_TO_VSI ||
4135 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4136 fi->fltr_act == ICE_FWD_TO_Q ||
4137 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4138 status = ice_update_pkt_fwd_rule(hw, fi);
4144 ice_release_lock(rule_lock);
4150 * ice_add_update_vsi_list
4151 * @hw: pointer to the hardware structure
4152 * @m_entry: pointer to current filter management list entry
4153 * @cur_fltr: filter information from the book keeping entry
4154 * @new_fltr: filter information with the new VSI to be added
4156 * Call AQ command to add or update previously created VSI list with new VSI.
4158 * Helper function to do book keeping associated with adding filter information
4159 * The algorithm to do the book keeping is described below :
4160 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4161 * if only one VSI has been added till now
4162 * Allocate a new VSI list and add two VSIs
4163 * to this list using switch rule command
4164 * Update the previously created switch rule with the
4165 * newly created VSI list ID
4166 * if a VSI list was previously created
4167 * Add the new VSI to the previously created VSI list set
4168 * using the update switch rule command
4170 static enum ice_status
4171 ice_add_update_vsi_list(struct ice_hw *hw,
4172 struct ice_fltr_mgmt_list_entry *m_entry,
4173 struct ice_fltr_info *cur_fltr,
4174 struct ice_fltr_info *new_fltr)
4176 enum ice_status status = ICE_SUCCESS;
4177 u16 vsi_list_id = 0;
4179 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4180 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4181 return ICE_ERR_NOT_IMPL;
4183 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4184 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4185 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4186 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4187 return ICE_ERR_NOT_IMPL;
4189 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4190 /* Only one entry existed in the mapping and it was not already
4191 * a part of a VSI list. So, create a VSI list with the old and
4194 struct ice_fltr_info tmp_fltr;
4195 u16 vsi_handle_arr[2];
4197 /* A rule already exists with the new VSI being added */
4198 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4199 return ICE_ERR_ALREADY_EXISTS;
4201 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4202 vsi_handle_arr[1] = new_fltr->vsi_handle;
4203 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4205 new_fltr->lkup_type);
4209 tmp_fltr = *new_fltr;
4210 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4211 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4212 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4213 /* Update the previous switch rule of "MAC forward to VSI" to
4214 * "MAC fwd to VSI list"
4216 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4220 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4221 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4222 m_entry->vsi_list_info =
4223 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4226 if (!m_entry->vsi_list_info)
4227 return ICE_ERR_NO_MEMORY;
4229 /* If this entry was large action then the large action needs
4230 * to be updated to point to FWD to VSI list
4232 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4234 ice_add_marker_act(hw, m_entry,
4235 m_entry->sw_marker_id,
4236 m_entry->lg_act_idx);
4238 u16 vsi_handle = new_fltr->vsi_handle;
4239 enum ice_adminq_opc opcode;
4241 if (!m_entry->vsi_list_info)
4244 /* A rule already exists with the new VSI being added */
4245 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4248 /* Update the previously created VSI list set with
4249 * the new VSI ID passed in
4251 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4252 opcode = ice_aqc_opc_update_sw_rules;
4254 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4255 vsi_list_id, false, opcode,
4256 new_fltr->lkup_type);
4257 /* update VSI list mapping info with new VSI ID */
4259 ice_set_bit(vsi_handle,
4260 m_entry->vsi_list_info->vsi_map);
4263 m_entry->vsi_count++;
4268 * ice_find_rule_entry - Search a rule entry
4269 * @list_head: head of rule list
4270 * @f_info: rule information
4272 * Helper function to search for a given rule entry
4273 * Returns pointer to entry storing the rule if found
4275 static struct ice_fltr_mgmt_list_entry *
4276 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4277 struct ice_fltr_info *f_info)
4279 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4281 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4283 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4284 sizeof(f_info->l_data)) &&
4285 f_info->flag == list_itr->fltr_info.flag) {
4294 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4295 * @recp_list: VSI lists needs to be searched
4296 * @vsi_handle: VSI handle to be found in VSI list
4297 * @vsi_list_id: VSI list ID found containing vsi_handle
4299 * Helper function to search a VSI list with single entry containing given VSI
4300 * handle element. This can be extended further to search VSI list with more
4301 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4303 static struct ice_vsi_list_map_info *
4304 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4307 struct ice_vsi_list_map_info *map_info = NULL;
4308 struct LIST_HEAD_TYPE *list_head;
4310 list_head = &recp_list->filt_rules;
4311 if (recp_list->adv_rule) {
4312 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4314 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4315 ice_adv_fltr_mgmt_list_entry,
4317 if (list_itr->vsi_list_info) {
4318 map_info = list_itr->vsi_list_info;
4319 if (ice_is_bit_set(map_info->vsi_map,
4321 *vsi_list_id = map_info->vsi_list_id;
4327 struct ice_fltr_mgmt_list_entry *list_itr;
4329 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4330 ice_fltr_mgmt_list_entry,
4332 if (list_itr->vsi_count == 1 &&
4333 list_itr->vsi_list_info) {
4334 map_info = list_itr->vsi_list_info;
4335 if (ice_is_bit_set(map_info->vsi_map,
4337 *vsi_list_id = map_info->vsi_list_id;
4347 * ice_add_rule_internal - add rule for a given lookup type
4348 * @hw: pointer to the hardware structure
4349 * @recp_list: recipe list for which rule has to be added
4350 * @lport: logic port number on which function add rule
4351 * @f_entry: structure containing MAC forwarding information
4353 * Adds or updates the rule lists for a given recipe
4355 static enum ice_status
4356 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4357 u8 lport, struct ice_fltr_list_entry *f_entry)
4359 struct ice_fltr_info *new_fltr, *cur_fltr;
4360 struct ice_fltr_mgmt_list_entry *m_entry;
4361 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4362 enum ice_status status = ICE_SUCCESS;
4364 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4365 return ICE_ERR_PARAM;
4367 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4368 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4369 f_entry->fltr_info.fwd_id.hw_vsi_id =
4370 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4372 rule_lock = &recp_list->filt_rule_lock;
4374 ice_acquire_lock(rule_lock);
4375 new_fltr = &f_entry->fltr_info;
4376 if (new_fltr->flag & ICE_FLTR_RX)
4377 new_fltr->src = lport;
4378 else if (new_fltr->flag & ICE_FLTR_TX)
4380 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4382 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4384 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4385 goto exit_add_rule_internal;
4388 cur_fltr = &m_entry->fltr_info;
4389 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4391 exit_add_rule_internal:
4392 ice_release_lock(rule_lock);
4397 * ice_remove_vsi_list_rule
4398 * @hw: pointer to the hardware structure
4399 * @vsi_list_id: VSI list ID generated as part of allocate resource
4400 * @lkup_type: switch rule filter lookup type
4402 * The VSI list should be emptied before this function is called to remove the
4405 static enum ice_status
4406 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4407 enum ice_sw_lkup_type lkup_type)
4409 /* Free the vsi_list resource that we allocated. It is assumed that the
4410 * list is empty at this point.
4412 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4413 ice_aqc_opc_free_res);
4417 * ice_rem_update_vsi_list
4418 * @hw: pointer to the hardware structure
4419 * @vsi_handle: VSI handle of the VSI to remove
4420 * @fm_list: filter management entry for which the VSI list management needs to
4423 static enum ice_status
4424 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4425 struct ice_fltr_mgmt_list_entry *fm_list)
4427 enum ice_sw_lkup_type lkup_type;
4428 enum ice_status status = ICE_SUCCESS;
4431 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4432 fm_list->vsi_count == 0)
4433 return ICE_ERR_PARAM;
4435 /* A rule with the VSI being removed does not exist */
4436 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4437 return ICE_ERR_DOES_NOT_EXIST;
4439 lkup_type = fm_list->fltr_info.lkup_type;
4440 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4441 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4442 ice_aqc_opc_update_sw_rules,
4447 fm_list->vsi_count--;
4448 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4450 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4451 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4452 struct ice_vsi_list_map_info *vsi_list_info =
4453 fm_list->vsi_list_info;
4456 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4458 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4459 return ICE_ERR_OUT_OF_RANGE;
4461 /* Make sure VSI list is empty before removing it below */
4462 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4464 ice_aqc_opc_update_sw_rules,
4469 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4470 tmp_fltr_info.fwd_id.hw_vsi_id =
4471 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4472 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4473 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4475 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4476 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4480 fm_list->fltr_info = tmp_fltr_info;
4483 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4484 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4485 struct ice_vsi_list_map_info *vsi_list_info =
4486 fm_list->vsi_list_info;
4488 /* Remove the VSI list since it is no longer used */
4489 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4491 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4492 vsi_list_id, status);
4496 LIST_DEL(&vsi_list_info->list_entry);
4497 ice_free(hw, vsi_list_info);
4498 fm_list->vsi_list_info = NULL;
4505 * ice_remove_rule_internal - Remove a filter rule of a given type
4507 * @hw: pointer to the hardware structure
4508 * @recp_list: recipe list for which the rule needs to removed
4509 * @f_entry: rule entry containing filter information
4511 static enum ice_status
4512 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4513 struct ice_fltr_list_entry *f_entry)
4515 struct ice_fltr_mgmt_list_entry *list_elem;
4516 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4517 enum ice_status status = ICE_SUCCESS;
4518 bool remove_rule = false;
4521 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4522 return ICE_ERR_PARAM;
4523 f_entry->fltr_info.fwd_id.hw_vsi_id =
4524 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4526 rule_lock = &recp_list->filt_rule_lock;
4527 ice_acquire_lock(rule_lock);
4528 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4529 &f_entry->fltr_info);
4531 status = ICE_ERR_DOES_NOT_EXIST;
4535 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4537 } else if (!list_elem->vsi_list_info) {
4538 status = ICE_ERR_DOES_NOT_EXIST;
4540 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4541 /* a ref_cnt > 1 indicates that the vsi_list is being
4542 * shared by multiple rules. Decrement the ref_cnt and
4543 * remove this rule, but do not modify the list, as it
4544 * is in-use by other rules.
4546 list_elem->vsi_list_info->ref_cnt--;
4549 /* a ref_cnt of 1 indicates the vsi_list is only used
4550 * by one rule. However, the original removal request is only
4551 * for a single VSI. Update the vsi_list first, and only
4552 * remove the rule if there are no further VSIs in this list.
4554 vsi_handle = f_entry->fltr_info.vsi_handle;
4555 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4558 /* if VSI count goes to zero after updating the VSI list */
4559 if (list_elem->vsi_count == 0)
4564 /* Remove the lookup rule */
4565 struct ice_aqc_sw_rules_elem *s_rule;
4567 s_rule = (struct ice_aqc_sw_rules_elem *)
4568 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4570 status = ICE_ERR_NO_MEMORY;
4574 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4575 ice_aqc_opc_remove_sw_rules);
4577 status = ice_aq_sw_rules(hw, s_rule,
4578 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4579 ice_aqc_opc_remove_sw_rules, NULL);
4581 /* Remove a book keeping from the list */
4582 ice_free(hw, s_rule);
4587 LIST_DEL(&list_elem->list_entry);
4588 ice_free(hw, list_elem);
4591 ice_release_lock(rule_lock);
4596 * ice_aq_get_res_alloc - get allocated resources
4597 * @hw: pointer to the HW struct
4598 * @num_entries: pointer to u16 to store the number of resource entries returned
4599 * @buf: pointer to buffer
4600 * @buf_size: size of buf
4601 * @cd: pointer to command details structure or NULL
4603 * The caller-supplied buffer must be large enough to store the resource
4604 * information for all resource types. Each resource type is an
4605 * ice_aqc_get_res_resp_elem structure.
4608 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4609 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4610 struct ice_sq_cd *cd)
4612 struct ice_aqc_get_res_alloc *resp;
4613 enum ice_status status;
4614 struct ice_aq_desc desc;
4617 return ICE_ERR_BAD_PTR;
4619 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4620 return ICE_ERR_INVAL_SIZE;
4622 resp = &desc.params.get_res;
4624 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4625 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4627 if (!status && num_entries)
4628 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4634 * ice_aq_get_res_descs - get allocated resource descriptors
4635 * @hw: pointer to the hardware structure
4636 * @num_entries: number of resource entries in buffer
4637 * @buf: structure to hold response data buffer
4638 * @buf_size: size of buffer
4639 * @res_type: resource type
4640 * @res_shared: is resource shared
4641 * @desc_id: input - first desc ID to start; output - next desc ID
4642 * @cd: pointer to command details structure or NULL
4645 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4646 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4647 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4649 struct ice_aqc_get_allocd_res_desc *cmd;
4650 struct ice_aq_desc desc;
4651 enum ice_status status;
4653 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4655 cmd = &desc.params.get_res_desc;
4658 return ICE_ERR_PARAM;
4660 if (buf_size != (num_entries * sizeof(*buf)))
4661 return ICE_ERR_PARAM;
4663 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4665 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4666 ICE_AQC_RES_TYPE_M) | (res_shared ?
4667 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4668 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4670 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4672 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4678 * ice_add_mac_rule - Add a MAC address based filter rule
4679 * @hw: pointer to the hardware structure
4680 * @m_list: list of MAC addresses and forwarding information
4681 * @sw: pointer to switch info struct for which function add rule
4682 * @lport: logic port number on which function add rule
4684 * IMPORTANT: When the umac_shared flag is set to false and m_list has
4685 * multiple unicast addresses, the function assumes that all the
4686 * addresses are unique in a given add_mac call. It doesn't
4687 * check for duplicates in this case, removing duplicates from a given
4688 * list should be taken care of in the caller of this function.
4690 static enum ice_status
4691 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4692 struct ice_switch_info *sw, u8 lport)
4694 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4695 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4696 struct ice_fltr_list_entry *m_list_itr;
4697 struct LIST_HEAD_TYPE *rule_head;
4698 u16 total_elem_left, s_rule_size;
4699 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4700 enum ice_status status = ICE_SUCCESS;
4701 u16 num_unicast = 0;
4705 rule_lock = &recp_list->filt_rule_lock;
4706 rule_head = &recp_list->filt_rules;
4708 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4710 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4714 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4715 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4716 if (!ice_is_vsi_valid(hw, vsi_handle))
4717 return ICE_ERR_PARAM;
4718 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4719 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4720 /* update the src in case it is VSI num */
4721 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4722 return ICE_ERR_PARAM;
4723 m_list_itr->fltr_info.src = hw_vsi_id;
4724 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4725 IS_ZERO_ETHER_ADDR(add))
4726 return ICE_ERR_PARAM;
4727 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
4728 /* Don't overwrite the unicast address */
4729 ice_acquire_lock(rule_lock);
4730 if (ice_find_rule_entry(rule_head,
4731 &m_list_itr->fltr_info)) {
4732 ice_release_lock(rule_lock);
4735 ice_release_lock(rule_lock);
4737 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4738 (IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
4739 m_list_itr->status =
4740 ice_add_rule_internal(hw, recp_list, lport,
4742 if (m_list_itr->status)
4743 return m_list_itr->status;
4747 ice_acquire_lock(rule_lock);
4748 /* Exit if no suitable entries were found for adding bulk switch rule */
4750 status = ICE_SUCCESS;
4751 goto ice_add_mac_exit;
4754 /* Allocate switch rule buffer for the bulk update for unicast */
4755 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4756 s_rule = (struct ice_aqc_sw_rules_elem *)
4757 ice_calloc(hw, num_unicast, s_rule_size);
4759 status = ICE_ERR_NO_MEMORY;
4760 goto ice_add_mac_exit;
4764 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4766 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4767 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4769 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4770 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4771 ice_aqc_opc_add_sw_rules);
4772 r_iter = (struct ice_aqc_sw_rules_elem *)
4773 ((u8 *)r_iter + s_rule_size);
4777 /* Call AQ bulk switch rule update for all unicast addresses */
4779 /* Call AQ switch rule in AQ_MAX chunk */
4780 for (total_elem_left = num_unicast; total_elem_left > 0;
4781 total_elem_left -= elem_sent) {
4782 struct ice_aqc_sw_rules_elem *entry = r_iter;
4784 elem_sent = MIN_T(u8, total_elem_left,
4785 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4786 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4787 elem_sent, ice_aqc_opc_add_sw_rules,
4790 goto ice_add_mac_exit;
4791 r_iter = (struct ice_aqc_sw_rules_elem *)
4792 ((u8 *)r_iter + (elem_sent * s_rule_size));
4795 /* Fill up rule ID based on the value returned from FW */
4797 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4799 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4800 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4801 struct ice_fltr_mgmt_list_entry *fm_entry;
4803 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4804 f_info->fltr_rule_id =
4805 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4806 f_info->fltr_act = ICE_FWD_TO_VSI;
4807 /* Create an entry to track this MAC address */
4808 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4809 ice_malloc(hw, sizeof(*fm_entry));
4811 status = ICE_ERR_NO_MEMORY;
4812 goto ice_add_mac_exit;
4814 fm_entry->fltr_info = *f_info;
4815 fm_entry->vsi_count = 1;
4816 /* The book keeping entries will get removed when
4817 * base driver calls remove filter AQ command
4820 LIST_ADD(&fm_entry->list_entry, rule_head);
4821 r_iter = (struct ice_aqc_sw_rules_elem *)
4822 ((u8 *)r_iter + s_rule_size);
4827 ice_release_lock(rule_lock);
4829 ice_free(hw, s_rule);
4834 * ice_add_mac - Add a MAC address based filter rule
4835 * @hw: pointer to the hardware structure
4836 * @m_list: list of MAC addresses and forwarding information
4838 * Function add MAC rule for logical port from HW struct
4840 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4843 return ICE_ERR_PARAM;
4845 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4846 hw->port_info->lport);
4850 * ice_add_vlan_internal - Add one VLAN based filter rule
4851 * @hw: pointer to the hardware structure
4852 * @recp_list: recipe list for which rule has to be added
4853 * @f_entry: filter entry containing one VLAN information
4855 static enum ice_status
4856 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4857 struct ice_fltr_list_entry *f_entry)
4859 struct ice_fltr_mgmt_list_entry *v_list_itr;
4860 struct ice_fltr_info *new_fltr, *cur_fltr;
4861 enum ice_sw_lkup_type lkup_type;
4862 u16 vsi_list_id = 0, vsi_handle;
4863 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4864 enum ice_status status = ICE_SUCCESS;
4866 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4867 return ICE_ERR_PARAM;
4869 f_entry->fltr_info.fwd_id.hw_vsi_id =
4870 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4871 new_fltr = &f_entry->fltr_info;
4873 /* VLAN ID should only be 12 bits */
4874 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4875 return ICE_ERR_PARAM;
4877 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4878 return ICE_ERR_PARAM;
4880 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4881 lkup_type = new_fltr->lkup_type;
4882 vsi_handle = new_fltr->vsi_handle;
4883 rule_lock = &recp_list->filt_rule_lock;
4884 ice_acquire_lock(rule_lock);
4885 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4887 struct ice_vsi_list_map_info *map_info = NULL;
4889 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4890 /* All VLAN pruning rules use a VSI list. Check if
4891 * there is already a VSI list containing VSI that we
4892 * want to add. If found, use the same vsi_list_id for
4893 * this new VLAN rule or else create a new list.
4895 map_info = ice_find_vsi_list_entry(recp_list,
4899 status = ice_create_vsi_list_rule(hw,
4907 /* Convert the action to forwarding to a VSI list. */
4908 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4909 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4912 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4914 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4917 status = ICE_ERR_DOES_NOT_EXIST;
4920 /* reuse VSI list for new rule and increment ref_cnt */
4922 v_list_itr->vsi_list_info = map_info;
4923 map_info->ref_cnt++;
4925 v_list_itr->vsi_list_info =
4926 ice_create_vsi_list_map(hw, &vsi_handle,
4930 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4931 /* Update existing VSI list to add new VSI ID only if it used
4934 cur_fltr = &v_list_itr->fltr_info;
4935 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4938 /* If VLAN rule exists and VSI list being used by this rule is
4939 * referenced by more than 1 VLAN rule. Then create a new VSI
4940 * list appending previous VSI with new VSI and update existing
4941 * VLAN rule to point to new VSI list ID
4943 struct ice_fltr_info tmp_fltr;
4944 u16 vsi_handle_arr[2];
4947 /* Current implementation only supports reusing VSI list with
4948 * one VSI count. We should never hit below condition
4950 if (v_list_itr->vsi_count > 1 &&
4951 v_list_itr->vsi_list_info->ref_cnt > 1) {
4952 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4953 status = ICE_ERR_CFG;
4958 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4961 /* A rule already exists with the new VSI being added */
4962 if (cur_handle == vsi_handle) {
4963 status = ICE_ERR_ALREADY_EXISTS;
4967 vsi_handle_arr[0] = cur_handle;
4968 vsi_handle_arr[1] = vsi_handle;
4969 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4970 &vsi_list_id, lkup_type);
4974 tmp_fltr = v_list_itr->fltr_info;
4975 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4976 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4977 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4978 /* Update the previous switch rule to a new VSI list which
4979 * includes current VSI that is requested
4981 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4985 /* before overriding VSI list map info. decrement ref_cnt of
4988 v_list_itr->vsi_list_info->ref_cnt--;
4990 /* now update to newly created list */
4991 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4992 v_list_itr->vsi_list_info =
4993 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4995 v_list_itr->vsi_count++;
4999 ice_release_lock(rule_lock);
5004 * ice_add_vlan_rule - Add VLAN based filter rule
5005 * @hw: pointer to the hardware structure
5006 * @v_list: list of VLAN entries and forwarding information
5007 * @sw: pointer to switch info struct for which function add rule
5009 static enum ice_status
5010 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5011 struct ice_switch_info *sw)
5013 struct ice_fltr_list_entry *v_list_itr;
5014 struct ice_sw_recipe *recp_list;
5016 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5017 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5019 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5020 return ICE_ERR_PARAM;
5021 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5022 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5024 if (v_list_itr->status)
5025 return v_list_itr->status;
5031 * ice_add_vlan - Add a VLAN based filter rule
5032 * @hw: pointer to the hardware structure
5033 * @v_list: list of VLAN and forwarding information
5035 * Function add VLAN rule for logical port from HW struct
5037 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5040 return ICE_ERR_PARAM;
5042 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5046 * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5047 * @hw: pointer to the hardware structure
5048 * @mv_list: list of MAC and VLAN filters
5049 * @sw: pointer to switch info struct for which function add rule
5050 * @lport: logic port number on which function add rule
5052 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5053 * pruning bits enabled, then it is the responsibility of the caller to make
5054 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5055 * VLAN won't be received on that VSI otherwise.
5057 static enum ice_status
5058 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5059 struct ice_switch_info *sw, u8 lport)
5061 struct ice_fltr_list_entry *mv_list_itr;
5062 struct ice_sw_recipe *recp_list;
5064 if (!mv_list || !hw)
5065 return ICE_ERR_PARAM;
5067 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5068 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5070 enum ice_sw_lkup_type l_type =
5071 mv_list_itr->fltr_info.lkup_type;
5073 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5074 return ICE_ERR_PARAM;
5075 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5076 mv_list_itr->status =
5077 ice_add_rule_internal(hw, recp_list, lport,
5079 if (mv_list_itr->status)
5080 return mv_list_itr->status;
5086 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5087 * @hw: pointer to the hardware structure
5088 * @mv_list: list of MAC VLAN addresses and forwarding information
5090 * Function add MAC VLAN rule for logical port from HW struct
5093 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5095 if (!mv_list || !hw)
5096 return ICE_ERR_PARAM;
5098 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5099 hw->port_info->lport);
5103 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5104 * @hw: pointer to the hardware structure
5105 * @em_list: list of ether type MAC filter, MAC is optional
5106 * @sw: pointer to switch info struct for which function add rule
5107 * @lport: logic port number on which function add rule
5109 * This function requires the caller to populate the entries in
5110 * the filter list with the necessary fields (including flags to
5111 * indicate Tx or Rx rules).
5113 static enum ice_status
5114 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5115 struct ice_switch_info *sw, u8 lport)
5117 struct ice_fltr_list_entry *em_list_itr;
5119 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5121 struct ice_sw_recipe *recp_list;
5122 enum ice_sw_lkup_type l_type;
5124 l_type = em_list_itr->fltr_info.lkup_type;
5125 recp_list = &sw->recp_list[l_type];
5127 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5128 l_type != ICE_SW_LKUP_ETHERTYPE)
5129 return ICE_ERR_PARAM;
5131 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5134 if (em_list_itr->status)
5135 return em_list_itr->status;
5141 * ice_add_eth_mac - Add a ethertype based filter rule
5142 * @hw: pointer to the hardware structure
5143 * @em_list: list of ethertype and forwarding information
5145 * Function add ethertype rule for logical port from HW struct
5148 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5150 if (!em_list || !hw)
5151 return ICE_ERR_PARAM;
5153 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5154 hw->port_info->lport);
5158 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5159 * @hw: pointer to the hardware structure
5160 * @em_list: list of ethertype or ethertype MAC entries
5161 * @sw: pointer to switch info struct for which function add rule
5163 static enum ice_status
5164 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5165 struct ice_switch_info *sw)
5167 struct ice_fltr_list_entry *em_list_itr, *tmp;
5169 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5171 struct ice_sw_recipe *recp_list;
5172 enum ice_sw_lkup_type l_type;
5174 l_type = em_list_itr->fltr_info.lkup_type;
5176 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5177 l_type != ICE_SW_LKUP_ETHERTYPE)
5178 return ICE_ERR_PARAM;
5180 recp_list = &sw->recp_list[l_type];
5181 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5183 if (em_list_itr->status)
5184 return em_list_itr->status;
5190 * ice_remove_eth_mac - remove a ethertype based filter rule
5191 * @hw: pointer to the hardware structure
5192 * @em_list: list of ethertype and forwarding information
5196 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5198 if (!em_list || !hw)
5199 return ICE_ERR_PARAM;
5201 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5205 * ice_rem_sw_rule_info
5206 * @hw: pointer to the hardware structure
5207 * @rule_head: pointer to the switch list structure that we want to delete
5210 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5212 if (!LIST_EMPTY(rule_head)) {
5213 struct ice_fltr_mgmt_list_entry *entry;
5214 struct ice_fltr_mgmt_list_entry *tmp;
5216 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5217 ice_fltr_mgmt_list_entry, list_entry) {
5218 LIST_DEL(&entry->list_entry);
5219 ice_free(hw, entry);
5225 * ice_rem_adv_rule_info
5226 * @hw: pointer to the hardware structure
5227 * @rule_head: pointer to the switch list structure that we want to delete
5230 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5232 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5233 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5235 if (LIST_EMPTY(rule_head))
5238 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5239 ice_adv_fltr_mgmt_list_entry, list_entry) {
5240 LIST_DEL(&lst_itr->list_entry);
5241 ice_free(hw, lst_itr->lkups);
5242 ice_free(hw, lst_itr);
5247 * ice_rem_all_sw_rules_info
5248 * @hw: pointer to the hardware structure
5250 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5252 struct ice_switch_info *sw = hw->switch_info;
5255 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5256 struct LIST_HEAD_TYPE *rule_head;
5258 rule_head = &sw->recp_list[i].filt_rules;
5259 if (!sw->recp_list[i].adv_rule)
5260 ice_rem_sw_rule_info(hw, rule_head);
5262 ice_rem_adv_rule_info(hw, rule_head);
5263 if (sw->recp_list[i].adv_rule &&
5264 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5265 sw->recp_list[i].adv_rule = false;
5270 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5271 * @pi: pointer to the port_info structure
5272 * @vsi_handle: VSI handle to set as default
5273 * @set: true to add the above mentioned switch rule, false to remove it
5274 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5276 * add filter rule to set/unset given VSI as default VSI for the switch
5277 * (represented by swid)
5280 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5283 struct ice_aqc_sw_rules_elem *s_rule;
5284 struct ice_fltr_info f_info;
5285 struct ice_hw *hw = pi->hw;
5286 enum ice_adminq_opc opcode;
5287 enum ice_status status;
5291 if (!ice_is_vsi_valid(hw, vsi_handle))
5292 return ICE_ERR_PARAM;
5293 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5295 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5296 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5298 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5300 return ICE_ERR_NO_MEMORY;
5302 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5304 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5305 f_info.flag = direction;
5306 f_info.fltr_act = ICE_FWD_TO_VSI;
5307 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5309 if (f_info.flag & ICE_FLTR_RX) {
5310 f_info.src = pi->lport;
5311 f_info.src_id = ICE_SRC_ID_LPORT;
5313 f_info.fltr_rule_id =
5314 pi->dflt_rx_vsi_rule_id;
5315 } else if (f_info.flag & ICE_FLTR_TX) {
5316 f_info.src_id = ICE_SRC_ID_VSI;
5317 f_info.src = hw_vsi_id;
5319 f_info.fltr_rule_id =
5320 pi->dflt_tx_vsi_rule_id;
5324 opcode = ice_aqc_opc_add_sw_rules;
5326 opcode = ice_aqc_opc_remove_sw_rules;
5328 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5330 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5331 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5334 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5336 if (f_info.flag & ICE_FLTR_TX) {
5337 pi->dflt_tx_vsi_num = hw_vsi_id;
5338 pi->dflt_tx_vsi_rule_id = index;
5339 } else if (f_info.flag & ICE_FLTR_RX) {
5340 pi->dflt_rx_vsi_num = hw_vsi_id;
5341 pi->dflt_rx_vsi_rule_id = index;
5344 if (f_info.flag & ICE_FLTR_TX) {
5345 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5346 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5347 } else if (f_info.flag & ICE_FLTR_RX) {
5348 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5349 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5354 ice_free(hw, s_rule);
5359 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5360 * @list_head: head of rule list
5361 * @f_info: rule information
5363 * Helper function to search for a unicast rule entry - this is to be used
5364 * to remove unicast MAC filter that is not shared with other VSIs on the
5367 * Returns pointer to entry storing the rule if found
5369 static struct ice_fltr_mgmt_list_entry *
5370 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5371 struct ice_fltr_info *f_info)
5373 struct ice_fltr_mgmt_list_entry *list_itr;
5375 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5377 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5378 sizeof(f_info->l_data)) &&
5379 f_info->fwd_id.hw_vsi_id ==
5380 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5381 f_info->flag == list_itr->fltr_info.flag)
5388 * ice_remove_mac_rule - remove a MAC based filter rule
5389 * @hw: pointer to the hardware structure
5390 * @m_list: list of MAC addresses and forwarding information
5391 * @recp_list: list from which function remove MAC address
5393 * This function removes either a MAC filter rule or a specific VSI from a
5394 * VSI list for a multicast MAC address.
5396 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5397 * ice_add_mac. Caller should be aware that this call will only work if all
5398 * the entries passed into m_list were added previously. It will not attempt to
5399 * do a partial remove of entries that were found.
5401 static enum ice_status
5402 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5403 struct ice_sw_recipe *recp_list)
5405 struct ice_fltr_list_entry *list_itr, *tmp;
5406 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5409 return ICE_ERR_PARAM;
5411 rule_lock = &recp_list->filt_rule_lock;
5412 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5414 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5415 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5418 if (l_type != ICE_SW_LKUP_MAC)
5419 return ICE_ERR_PARAM;
5421 vsi_handle = list_itr->fltr_info.vsi_handle;
5422 if (!ice_is_vsi_valid(hw, vsi_handle))
5423 return ICE_ERR_PARAM;
5425 list_itr->fltr_info.fwd_id.hw_vsi_id =
5426 ice_get_hw_vsi_num(hw, vsi_handle);
5427 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
5428 /* Don't remove the unicast address that belongs to
5429 * another VSI on the switch, since it is not being
5432 ice_acquire_lock(rule_lock);
5433 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5434 &list_itr->fltr_info)) {
5435 ice_release_lock(rule_lock);
5436 return ICE_ERR_DOES_NOT_EXIST;
5438 ice_release_lock(rule_lock);
5440 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5442 if (list_itr->status)
5443 return list_itr->status;
5449 * ice_remove_mac - remove a MAC address based filter rule
5450 * @hw: pointer to the hardware structure
5451 * @m_list: list of MAC addresses and forwarding information
5454 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5456 struct ice_sw_recipe *recp_list;
5458 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5459 return ice_remove_mac_rule(hw, m_list, recp_list);
5463 * ice_remove_vlan_rule - Remove VLAN based filter rule
5464 * @hw: pointer to the hardware structure
5465 * @v_list: list of VLAN entries and forwarding information
5466 * @recp_list: list from which function remove VLAN
5468 static enum ice_status
5469 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5470 struct ice_sw_recipe *recp_list)
5472 struct ice_fltr_list_entry *v_list_itr, *tmp;
5474 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5476 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5478 if (l_type != ICE_SW_LKUP_VLAN)
5479 return ICE_ERR_PARAM;
5480 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5482 if (v_list_itr->status)
5483 return v_list_itr->status;
5489 * ice_remove_vlan - remove a VLAN address based filter rule
5490 * @hw: pointer to the hardware structure
5491 * @v_list: list of VLAN and forwarding information
5495 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5497 struct ice_sw_recipe *recp_list;
5500 return ICE_ERR_PARAM;
5502 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5503 return ice_remove_vlan_rule(hw, v_list, recp_list);
5507 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5508 * @hw: pointer to the hardware structure
5509 * @v_list: list of MAC VLAN entries and forwarding information
5510 * @recp_list: list from which function remove MAC VLAN
5512 static enum ice_status
5513 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5514 struct ice_sw_recipe *recp_list)
5516 struct ice_fltr_list_entry *v_list_itr, *tmp;
5518 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5519 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5521 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5523 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5524 return ICE_ERR_PARAM;
5525 v_list_itr->status =
5526 ice_remove_rule_internal(hw, recp_list,
5528 if (v_list_itr->status)
5529 return v_list_itr->status;
5535 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5536 * @hw: pointer to the hardware structure
5537 * @mv_list: list of MAC VLAN and forwarding information
5540 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5542 struct ice_sw_recipe *recp_list;
5544 if (!mv_list || !hw)
5545 return ICE_ERR_PARAM;
5547 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5548 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5552 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5553 * @fm_entry: filter entry to inspect
5554 * @vsi_handle: VSI handle to compare with filter info
5557 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5559 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5560 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5561 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5562 fm_entry->vsi_list_info &&
5563 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5568 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5569 * @hw: pointer to the hardware structure
5570 * @vsi_handle: VSI handle to remove filters from
5571 * @vsi_list_head: pointer to the list to add entry to
5572 * @fi: pointer to fltr_info of filter entry to copy & add
5574 * Helper function, used when creating a list of filters to remove from
5575 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5576 * original filter entry, with the exception of fltr_info.fltr_act and
5577 * fltr_info.fwd_id fields. These are set such that later logic can
5578 * extract which VSI to remove the fltr from, and pass on that information.
5580 static enum ice_status
5581 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5582 struct LIST_HEAD_TYPE *vsi_list_head,
5583 struct ice_fltr_info *fi)
5585 struct ice_fltr_list_entry *tmp;
5587 /* this memory is freed up in the caller function
5588 * once filters for this VSI are removed
5590 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5592 return ICE_ERR_NO_MEMORY;
5594 tmp->fltr_info = *fi;
5596 /* Overwrite these fields to indicate which VSI to remove filter from,
5597 * so find and remove logic can extract the information from the
5598 * list entries. Note that original entries will still have proper
5601 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5602 tmp->fltr_info.vsi_handle = vsi_handle;
5603 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5605 LIST_ADD(&tmp->list_entry, vsi_list_head);
5611 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5612 * @hw: pointer to the hardware structure
5613 * @vsi_handle: VSI handle to remove filters from
5614 * @lkup_list_head: pointer to the list that has certain lookup type filters
5615 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5617 * Locates all filters in lkup_list_head that are used by the given VSI,
5618 * and adds COPIES of those entries to vsi_list_head (intended to be used
5619 * to remove the listed filters).
5620 * Note that this means all entries in vsi_list_head must be explicitly
5621 * deallocated by the caller when done with list.
5623 static enum ice_status
5624 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5625 struct LIST_HEAD_TYPE *lkup_list_head,
5626 struct LIST_HEAD_TYPE *vsi_list_head)
5628 struct ice_fltr_mgmt_list_entry *fm_entry;
5629 enum ice_status status = ICE_SUCCESS;
5631 /* check to make sure VSI ID is valid and within boundary */
5632 if (!ice_is_vsi_valid(hw, vsi_handle))
5633 return ICE_ERR_PARAM;
5635 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5636 ice_fltr_mgmt_list_entry, list_entry) {
5637 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5640 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5642 &fm_entry->fltr_info);
5650 * ice_determine_promisc_mask
5651 * @fi: filter info to parse
5653 * Helper function to determine which ICE_PROMISC_ mask corresponds
5654 * to given filter into.
5656 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5658 u16 vid = fi->l_data.mac_vlan.vlan_id;
5659 u8 *macaddr = fi->l_data.mac.mac_addr;
5660 bool is_tx_fltr = false;
5661 u8 promisc_mask = 0;
5663 if (fi->flag == ICE_FLTR_TX)
5666 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5667 promisc_mask |= is_tx_fltr ?
5668 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5669 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5670 promisc_mask |= is_tx_fltr ?
5671 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5672 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5673 promisc_mask |= is_tx_fltr ?
5674 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5676 promisc_mask |= is_tx_fltr ?
5677 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5679 return promisc_mask;
5683 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5684 * @hw: pointer to the hardware structure
5685 * @vsi_handle: VSI handle to retrieve info from
5686 * @promisc_mask: pointer to mask to be filled in
5687 * @vid: VLAN ID of promisc VLAN VSI
5688 * @sw: pointer to switch info struct for which function add rule
5690 static enum ice_status
5691 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5692 u16 *vid, struct ice_switch_info *sw)
5694 struct ice_fltr_mgmt_list_entry *itr;
5695 struct LIST_HEAD_TYPE *rule_head;
5696 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5698 if (!ice_is_vsi_valid(hw, vsi_handle))
5699 return ICE_ERR_PARAM;
5703 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5704 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5706 ice_acquire_lock(rule_lock);
5707 LIST_FOR_EACH_ENTRY(itr, rule_head,
5708 ice_fltr_mgmt_list_entry, list_entry) {
5709 /* Continue if this filter doesn't apply to this VSI or the
5710 * VSI ID is not in the VSI map for this filter
5712 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5715 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5717 ice_release_lock(rule_lock);
5723 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5724 * @hw: pointer to the hardware structure
5725 * @vsi_handle: VSI handle to retrieve info from
5726 * @promisc_mask: pointer to mask to be filled in
5727 * @vid: VLAN ID of promisc VLAN VSI
5730 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5733 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5734 vid, hw->switch_info);
5738 * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5739 * @hw: pointer to the hardware structure
5740 * @vsi_handle: VSI handle to retrieve info from
5741 * @promisc_mask: pointer to mask to be filled in
5742 * @vid: VLAN ID of promisc VLAN VSI
5743 * @sw: pointer to switch info struct for which function add rule
5745 static enum ice_status
5746 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5747 u16 *vid, struct ice_switch_info *sw)
5749 struct ice_fltr_mgmt_list_entry *itr;
5750 struct LIST_HEAD_TYPE *rule_head;
5751 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5753 if (!ice_is_vsi_valid(hw, vsi_handle))
5754 return ICE_ERR_PARAM;
5758 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5759 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5761 ice_acquire_lock(rule_lock);
5762 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5764 /* Continue if this filter doesn't apply to this VSI or the
5765 * VSI ID is not in the VSI map for this filter
5767 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5770 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5772 ice_release_lock(rule_lock);
5778 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5779 * @hw: pointer to the hardware structure
5780 * @vsi_handle: VSI handle to retrieve info from
5781 * @promisc_mask: pointer to mask to be filled in
5782 * @vid: VLAN ID of promisc VLAN VSI
5785 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5788 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5789 vid, hw->switch_info);
5793 * ice_remove_promisc - Remove promisc based filter rules
5794 * @hw: pointer to the hardware structure
5795 * @recp_id: recipe ID for which the rule needs to removed
5796 * @v_list: list of promisc entries
5798 static enum ice_status
5799 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5800 struct LIST_HEAD_TYPE *v_list)
5802 struct ice_fltr_list_entry *v_list_itr, *tmp;
5803 struct ice_sw_recipe *recp_list;
5805 recp_list = &hw->switch_info->recp_list[recp_id];
5806 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5808 v_list_itr->status =
5809 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5810 if (v_list_itr->status)
5811 return v_list_itr->status;
5817 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5818 * @hw: pointer to the hardware structure
5819 * @vsi_handle: VSI handle to clear mode
5820 * @promisc_mask: mask of promiscuous config bits to clear
5821 * @vid: VLAN ID to clear VLAN promiscuous
5822 * @sw: pointer to switch info struct for which function add rule
5824 static enum ice_status
5825 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5826 u16 vid, struct ice_switch_info *sw)
5828 struct ice_fltr_list_entry *fm_entry, *tmp;
5829 struct LIST_HEAD_TYPE remove_list_head;
5830 struct ice_fltr_mgmt_list_entry *itr;
5831 struct LIST_HEAD_TYPE *rule_head;
5832 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5833 enum ice_status status = ICE_SUCCESS;
5836 if (!ice_is_vsi_valid(hw, vsi_handle))
5837 return ICE_ERR_PARAM;
5839 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5840 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5842 recipe_id = ICE_SW_LKUP_PROMISC;
5844 rule_head = &sw->recp_list[recipe_id].filt_rules;
5845 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5847 INIT_LIST_HEAD(&remove_list_head);
5849 ice_acquire_lock(rule_lock);
5850 LIST_FOR_EACH_ENTRY(itr, rule_head,
5851 ice_fltr_mgmt_list_entry, list_entry) {
5852 struct ice_fltr_info *fltr_info;
5853 u8 fltr_promisc_mask = 0;
5855 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5857 fltr_info = &itr->fltr_info;
5859 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5860 vid != fltr_info->l_data.mac_vlan.vlan_id)
5863 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5865 /* Skip if filter is not completely specified by given mask */
5866 if (fltr_promisc_mask & ~promisc_mask)
5869 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5873 ice_release_lock(rule_lock);
5874 goto free_fltr_list;
5877 ice_release_lock(rule_lock);
5879 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5882 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5883 ice_fltr_list_entry, list_entry) {
5884 LIST_DEL(&fm_entry->list_entry);
5885 ice_free(hw, fm_entry);
5892 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5893 * @hw: pointer to the hardware structure
5894 * @vsi_handle: VSI handle to clear mode
5895 * @promisc_mask: mask of promiscuous config bits to clear
5896 * @vid: VLAN ID to clear VLAN promiscuous
5899 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5900 u8 promisc_mask, u16 vid)
5902 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5903 vid, hw->switch_info);
5907 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5908 * @hw: pointer to the hardware structure
5909 * @vsi_handle: VSI handle to configure
5910 * @promisc_mask: mask of promiscuous config bits
5911 * @vid: VLAN ID to set VLAN promiscuous
5912 * @lport: logical port number to configure promisc mode
5913 * @sw: pointer to switch info struct for which function add rule
5915 static enum ice_status
5916 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5917 u16 vid, u8 lport, struct ice_switch_info *sw)
5919 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5920 struct ice_fltr_list_entry f_list_entry;
5921 struct ice_fltr_info new_fltr;
5922 enum ice_status status = ICE_SUCCESS;
5928 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5930 if (!ice_is_vsi_valid(hw, vsi_handle))
5931 return ICE_ERR_PARAM;
5932 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5934 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5936 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5937 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5938 new_fltr.l_data.mac_vlan.vlan_id = vid;
5939 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5941 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5942 recipe_id = ICE_SW_LKUP_PROMISC;
5945 /* Separate filters must be set for each direction/packet type
5946 * combination, so we will loop over the mask value, store the
5947 * individual type, and clear it out in the input mask as it
5950 while (promisc_mask) {
5951 struct ice_sw_recipe *recp_list;
5957 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5958 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5959 pkt_type = UCAST_FLTR;
5960 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5961 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5962 pkt_type = UCAST_FLTR;
5964 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5965 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5966 pkt_type = MCAST_FLTR;
5967 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5968 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5969 pkt_type = MCAST_FLTR;
5971 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5972 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5973 pkt_type = BCAST_FLTR;
5974 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5975 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5976 pkt_type = BCAST_FLTR;
5980 /* Check for VLAN promiscuous flag */
5981 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5982 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5983 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5984 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5988 /* Set filter DA based on packet type */
5989 mac_addr = new_fltr.l_data.mac.mac_addr;
5990 if (pkt_type == BCAST_FLTR) {
5991 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5992 } else if (pkt_type == MCAST_FLTR ||
5993 pkt_type == UCAST_FLTR) {
5994 /* Use the dummy ether header DA */
5995 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5996 ICE_NONDMA_TO_NONDMA);
5997 if (pkt_type == MCAST_FLTR)
5998 mac_addr[0] |= 0x1; /* Set multicast bit */
6001 /* Need to reset this to zero for all iterations */
6004 new_fltr.flag |= ICE_FLTR_TX;
6005 new_fltr.src = hw_vsi_id;
6007 new_fltr.flag |= ICE_FLTR_RX;
6008 new_fltr.src = lport;
6011 new_fltr.fltr_act = ICE_FWD_TO_VSI;
6012 new_fltr.vsi_handle = vsi_handle;
6013 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6014 f_list_entry.fltr_info = new_fltr;
6015 recp_list = &sw->recp_list[recipe_id];
6017 status = ice_add_rule_internal(hw, recp_list, lport,
6019 if (status != ICE_SUCCESS)
6020 goto set_promisc_exit;
6028 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6029 * @hw: pointer to the hardware structure
6030 * @vsi_handle: VSI handle to configure
6031 * @promisc_mask: mask of promiscuous config bits
6032 * @vid: VLAN ID to set VLAN promiscuous
6035 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6038 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6039 hw->port_info->lport,
6044 * _ice_set_vlan_vsi_promisc
6045 * @hw: pointer to the hardware structure
6046 * @vsi_handle: VSI handle to configure
6047 * @promisc_mask: mask of promiscuous config bits
6048 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6049 * @lport: logical port number to configure promisc mode
6050 * @sw: pointer to switch info struct for which function add rule
6052 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6054 static enum ice_status
6055 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6056 bool rm_vlan_promisc, u8 lport,
6057 struct ice_switch_info *sw)
6059 struct ice_fltr_list_entry *list_itr, *tmp;
6060 struct LIST_HEAD_TYPE vsi_list_head;
6061 struct LIST_HEAD_TYPE *vlan_head;
6062 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6063 enum ice_status status;
6066 INIT_LIST_HEAD(&vsi_list_head);
6067 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6068 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6069 ice_acquire_lock(vlan_lock);
6070 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6072 ice_release_lock(vlan_lock);
6074 goto free_fltr_list;
6076 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6078 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6079 if (rm_vlan_promisc)
6080 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6084 status = _ice_set_vsi_promisc(hw, vsi_handle,
6085 promisc_mask, vlan_id,
6092 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6093 ice_fltr_list_entry, list_entry) {
6094 LIST_DEL(&list_itr->list_entry);
6095 ice_free(hw, list_itr);
6101 * ice_set_vlan_vsi_promisc
6102 * @hw: pointer to the hardware structure
6103 * @vsi_handle: VSI handle to configure
6104 * @promisc_mask: mask of promiscuous config bits
6105 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6107 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6110 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6111 bool rm_vlan_promisc)
6113 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6114 rm_vlan_promisc, hw->port_info->lport,
6119 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6120 * @hw: pointer to the hardware structure
6121 * @vsi_handle: VSI handle to remove filters from
6122 * @recp_list: recipe list from which function remove fltr
6123 * @lkup: switch rule filter lookup type
6126 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6127 struct ice_sw_recipe *recp_list,
6128 enum ice_sw_lkup_type lkup)
6130 struct ice_fltr_list_entry *fm_entry;
6131 struct LIST_HEAD_TYPE remove_list_head;
6132 struct LIST_HEAD_TYPE *rule_head;
6133 struct ice_fltr_list_entry *tmp;
6134 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6135 enum ice_status status;
6137 INIT_LIST_HEAD(&remove_list_head);
6138 rule_lock = &recp_list[lkup].filt_rule_lock;
6139 rule_head = &recp_list[lkup].filt_rules;
6140 ice_acquire_lock(rule_lock);
6141 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6143 ice_release_lock(rule_lock);
6145 goto free_fltr_list;
6148 case ICE_SW_LKUP_MAC:
6149 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6151 case ICE_SW_LKUP_VLAN:
6152 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6154 case ICE_SW_LKUP_PROMISC:
6155 case ICE_SW_LKUP_PROMISC_VLAN:
6156 ice_remove_promisc(hw, lkup, &remove_list_head);
6158 case ICE_SW_LKUP_MAC_VLAN:
6159 ice_remove_mac_vlan(hw, &remove_list_head);
6161 case ICE_SW_LKUP_ETHERTYPE:
6162 case ICE_SW_LKUP_ETHERTYPE_MAC:
6163 ice_remove_eth_mac(hw, &remove_list_head);
6165 case ICE_SW_LKUP_DFLT:
6166 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6168 case ICE_SW_LKUP_LAST:
6169 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6174 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6175 ice_fltr_list_entry, list_entry) {
6176 LIST_DEL(&fm_entry->list_entry);
6177 ice_free(hw, fm_entry);
6182 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6183 * @hw: pointer to the hardware structure
6184 * @vsi_handle: VSI handle to remove filters from
6185 * @sw: pointer to switch info struct
6188 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6189 struct ice_switch_info *sw)
6191 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6193 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6194 sw->recp_list, ICE_SW_LKUP_MAC);
6195 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6196 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6197 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6198 sw->recp_list, ICE_SW_LKUP_PROMISC);
6199 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6200 sw->recp_list, ICE_SW_LKUP_VLAN);
6201 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6202 sw->recp_list, ICE_SW_LKUP_DFLT);
6203 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6204 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6205 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6206 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6207 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6208 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6212 * ice_remove_vsi_fltr - Remove all filters for a VSI
6213 * @hw: pointer to the hardware structure
6214 * @vsi_handle: VSI handle to remove filters from
6216 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6218 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6222 * ice_alloc_res_cntr - allocating resource counter
6223 * @hw: pointer to the hardware structure
6224 * @type: type of resource
6225 * @alloc_shared: if set it is shared else dedicated
6226 * @num_items: number of entries requested for FD resource type
6227 * @counter_id: counter index returned by AQ call
6230 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6233 struct ice_aqc_alloc_free_res_elem *buf;
6234 enum ice_status status;
6237 /* Allocate resource */
6238 buf_len = ice_struct_size(buf, elem, 1);
6239 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6241 return ICE_ERR_NO_MEMORY;
6243 buf->num_elems = CPU_TO_LE16(num_items);
6244 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6245 ICE_AQC_RES_TYPE_M) | alloc_shared);
6247 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6248 ice_aqc_opc_alloc_res, NULL);
6252 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6260 * ice_free_res_cntr - free resource counter
6261 * @hw: pointer to the hardware structure
6262 * @type: type of resource
6263 * @alloc_shared: if set it is shared else dedicated
6264 * @num_items: number of entries to be freed for FD resource type
6265 * @counter_id: counter ID resource which needs to be freed
6268 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6271 struct ice_aqc_alloc_free_res_elem *buf;
6272 enum ice_status status;
6276 buf_len = ice_struct_size(buf, elem, 1);
6277 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6279 return ICE_ERR_NO_MEMORY;
6281 buf->num_elems = CPU_TO_LE16(num_items);
6282 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6283 ICE_AQC_RES_TYPE_M) | alloc_shared);
6284 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6286 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6287 ice_aqc_opc_free_res, NULL);
6289 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6296 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6297 * @hw: pointer to the hardware structure
6298 * @counter_id: returns counter index
6300 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6302 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6303 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6308 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6309 * @hw: pointer to the hardware structure
6310 * @counter_id: counter index to be freed
6312 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6314 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6315 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6320 * ice_alloc_res_lg_act - add large action resource
6321 * @hw: pointer to the hardware structure
6322 * @l_id: large action ID to fill it in
6323 * @num_acts: number of actions to hold with a large action entry
6325 static enum ice_status
6326 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6328 struct ice_aqc_alloc_free_res_elem *sw_buf;
6329 enum ice_status status;
6332 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6333 return ICE_ERR_PARAM;
6335 /* Allocate resource for large action */
6336 buf_len = ice_struct_size(sw_buf, elem, 1);
6337 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6339 return ICE_ERR_NO_MEMORY;
6341 sw_buf->num_elems = CPU_TO_LE16(1);
6343 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6344 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6345 * If num_acts is greater than 2, then use
6346 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6347 * The num_acts cannot exceed 4. This was ensured at the
6348 * beginning of the function.
6351 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6352 else if (num_acts == 2)
6353 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6355 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6357 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6358 ice_aqc_opc_alloc_res, NULL);
6360 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6362 ice_free(hw, sw_buf);
6367 * ice_add_mac_with_sw_marker - add filter with sw marker
6368 * @hw: pointer to the hardware structure
6369 * @f_info: filter info structure containing the MAC filter information
6370 * @sw_marker: sw marker to tag the Rx descriptor with
6373 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6376 struct ice_fltr_mgmt_list_entry *m_entry;
6377 struct ice_fltr_list_entry fl_info;
6378 struct ice_sw_recipe *recp_list;
6379 struct LIST_HEAD_TYPE l_head;
6380 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6381 enum ice_status ret;
6385 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6386 return ICE_ERR_PARAM;
6388 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6389 return ICE_ERR_PARAM;
6391 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6392 return ICE_ERR_PARAM;
6394 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6395 return ICE_ERR_PARAM;
6396 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6398 /* Add filter if it doesn't exist so then the adding of large
6399 * action always results in update
6402 INIT_LIST_HEAD(&l_head);
6403 fl_info.fltr_info = *f_info;
6404 LIST_ADD(&fl_info.list_entry, &l_head);
6406 entry_exists = false;
6407 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6408 hw->port_info->lport);
6409 if (ret == ICE_ERR_ALREADY_EXISTS)
6410 entry_exists = true;
6414 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6415 rule_lock = &recp_list->filt_rule_lock;
6416 ice_acquire_lock(rule_lock);
6417 /* Get the book keeping entry for the filter */
6418 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6422 /* If counter action was enabled for this rule then don't enable
6423 * sw marker large action
6425 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6426 ret = ICE_ERR_PARAM;
6430 /* if same marker was added before */
6431 if (m_entry->sw_marker_id == sw_marker) {
6432 ret = ICE_ERR_ALREADY_EXISTS;
6436 /* Allocate a hardware table entry to hold large act. Three actions
6437 * for marker based large action
6439 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6443 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6446 /* Update the switch rule to add the marker action */
6447 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6449 ice_release_lock(rule_lock);
6454 ice_release_lock(rule_lock);
6455 /* only remove entry if it did not exist previously */
6457 ret = ice_remove_mac(hw, &l_head);
6463 * ice_add_mac_with_counter - add filter with counter enabled
6464 * @hw: pointer to the hardware structure
6465 * @f_info: pointer to filter info structure containing the MAC filter
6469 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6471 struct ice_fltr_mgmt_list_entry *m_entry;
6472 struct ice_fltr_list_entry fl_info;
6473 struct ice_sw_recipe *recp_list;
6474 struct LIST_HEAD_TYPE l_head;
6475 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6476 enum ice_status ret;
6481 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6482 return ICE_ERR_PARAM;
6484 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6485 return ICE_ERR_PARAM;
6487 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6488 return ICE_ERR_PARAM;
6489 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6490 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6492 entry_exist = false;
6494 rule_lock = &recp_list->filt_rule_lock;
6496 /* Add filter if it doesn't exist so then the adding of large
6497 * action always results in update
6499 INIT_LIST_HEAD(&l_head);
6501 fl_info.fltr_info = *f_info;
6502 LIST_ADD(&fl_info.list_entry, &l_head);
6504 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6505 hw->port_info->lport);
6506 if (ret == ICE_ERR_ALREADY_EXISTS)
6511 ice_acquire_lock(rule_lock);
6512 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6514 ret = ICE_ERR_BAD_PTR;
6518 /* Don't enable counter for a filter for which sw marker was enabled */
6519 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6520 ret = ICE_ERR_PARAM;
6524 /* If a counter was already enabled then don't need to add again */
6525 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6526 ret = ICE_ERR_ALREADY_EXISTS;
6530 /* Allocate a hardware table entry to VLAN counter */
6531 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6535 /* Allocate a hardware table entry to hold large act. Two actions for
6536 * counter based large action
6538 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6542 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6545 /* Update the switch rule to add the counter action */
6546 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6548 ice_release_lock(rule_lock);
6553 ice_release_lock(rule_lock);
6554 /* only remove entry if it did not exist previously */
6556 ret = ice_remove_mac(hw, &l_head);
6561 /* This is mapping table entry that maps every word within a given protocol
6562 * structure to the real byte offset as per the specification of that
6564 * for example dst address is 3 words in ethertype header and corresponding
6565 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6566 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6567 * matching entry describing its field. This needs to be updated if new
6568 * structure is added to that union.
6570 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6571 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6572 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6573 { ICE_ETYPE_OL, { 0 } },
6574 { ICE_VLAN_OFOS, { 2, 0 } },
6575 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6576 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6577 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6578 26, 28, 30, 32, 34, 36, 38 } },
6579 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6580 26, 28, 30, 32, 34, 36, 38 } },
6581 { ICE_TCP_IL, { 0, 2 } },
6582 { ICE_UDP_OF, { 0, 2 } },
6583 { ICE_UDP_ILOS, { 0, 2 } },
6584 { ICE_SCTP_IL, { 0, 2 } },
6585 { ICE_VXLAN, { 8, 10, 12, 14 } },
6586 { ICE_GENEVE, { 8, 10, 12, 14 } },
6587 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6588 { ICE_NVGRE, { 0, 2, 4, 6 } },
6589 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6590 { ICE_PPPOE, { 0, 2, 4, 6 } },
6591 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6592 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6593 { ICE_ESP, { 0, 2, 4, 6 } },
6594 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6595 { ICE_NAT_T, { 8, 10, 12, 14 } },
6596 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6597 { ICE_VLAN_EX, { 2, 0 } },
6598 { ICE_VLAN_IN, { 2, 0 } },
6601 /* The following table describes preferred grouping of recipes.
6602 * If a recipe that needs to be programmed is a superset or matches one of the
6603 * following combinations, then the recipe needs to be chained as per the
6607 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6608 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6609 { ICE_MAC_IL, ICE_MAC_IL_HW },
6610 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6611 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6612 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6613 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6614 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6615 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6616 { ICE_TCP_IL, ICE_TCP_IL_HW },
6617 { ICE_UDP_OF, ICE_UDP_OF_HW },
6618 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6619 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6620 { ICE_VXLAN, ICE_UDP_OF_HW },
6621 { ICE_GENEVE, ICE_UDP_OF_HW },
6622 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6623 { ICE_NVGRE, ICE_GRE_OF_HW },
6624 { ICE_GTP, ICE_UDP_OF_HW },
6625 { ICE_PPPOE, ICE_PPPOE_HW },
6626 { ICE_PFCP, ICE_UDP_ILOS_HW },
6627 { ICE_L2TPV3, ICE_L2TPV3_HW },
6628 { ICE_ESP, ICE_ESP_HW },
6629 { ICE_AH, ICE_AH_HW },
6630 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6631 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6632 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6633 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
6637 * ice_find_recp - find a recipe
6638 * @hw: pointer to the hardware structure
6639 * @lkup_exts: extension sequence to match
6641 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6643 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6644 enum ice_sw_tunnel_type tun_type, u32 priority)
6646 bool refresh_required = true;
6647 struct ice_sw_recipe *recp;
6650 /* Walk through existing recipes to find a match */
6651 recp = hw->switch_info->recp_list;
6652 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6653 /* If recipe was not created for this ID, in SW bookkeeping,
6654 * check if FW has an entry for this recipe. If the FW has an
6655 * entry update it in our SW bookkeeping and continue with the
6658 if (!recp[i].recp_created)
6659 if (ice_get_recp_frm_fw(hw,
6660 hw->switch_info->recp_list, i,
6664 /* Skip inverse action recipes */
6665 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6666 ICE_AQ_RECIPE_ACT_INV_ACT)
6669 /* if number of words we are looking for match */
6670 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6671 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6672 struct ice_fv_word *be = lkup_exts->fv_words;
6673 u16 *cr = recp[i].lkup_exts.field_mask;
6674 u16 *de = lkup_exts->field_mask;
6678 /* ar, cr, and qr are related to the recipe words, while
6679 * be, de, and pe are related to the lookup words
6681 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6682 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6684 if (ar[qr].off == be[pe].off &&
6685 ar[qr].prot_id == be[pe].prot_id &&
6687 /* Found the "pe"th word in the
6692 /* After walking through all the words in the
6693 * "i"th recipe if "p"th word was not found then
6694 * this recipe is not what we are looking for.
6695 * So break out from this loop and try the next
6698 if (qr >= recp[i].lkup_exts.n_val_words) {
6703 /* If for "i"th recipe the found was never set to false
6704 * then it means we found our match
6706 if (tun_type == recp[i].tun_type && found &&
6707 priority == recp[i].priority)
6708 return i; /* Return the recipe ID */
6711 return ICE_MAX_NUM_RECIPES;
6715 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6717 * As protocol id for outer vlan is different in dvm and svm, if dvm is
6718 * supported protocol array record for outer vlan has to be modified to
6719 * reflect the value proper for DVM.
6721 void ice_change_proto_id_to_dvm(void)
6725 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6726 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6727 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6728 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6732 * ice_prot_type_to_id - get protocol ID from protocol type
6733 * @type: protocol type
6734 * @id: pointer to variable that will receive the ID
6736 * Returns true if found, false otherwise
6738 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6742 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6743 if (ice_prot_id_tbl[i].type == type) {
6744 *id = ice_prot_id_tbl[i].protocol_id;
6751 * ice_fill_valid_words - count valid words
6752 * @rule: advanced rule with lookup information
6753 * @lkup_exts: byte offset extractions of the words that are valid
6755 * calculate valid words in a lookup rule using mask value
6758 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6759 struct ice_prot_lkup_ext *lkup_exts)
6761 u8 j, word, prot_id, ret_val;
6763 if (!ice_prot_type_to_id(rule->type, &prot_id))
6766 word = lkup_exts->n_val_words;
6768 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6769 if (((u16 *)&rule->m_u)[j] &&
6770 (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6771 /* No more space to accommodate */
6772 if (word >= ICE_MAX_CHAIN_WORDS)
6774 lkup_exts->fv_words[word].off =
6775 ice_prot_ext[rule->type].offs[j];
6776 lkup_exts->fv_words[word].prot_id =
6777 ice_prot_id_tbl[rule->type].protocol_id;
6778 lkup_exts->field_mask[word] =
6779 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6783 ret_val = word - lkup_exts->n_val_words;
6784 lkup_exts->n_val_words = word;
6790 * ice_create_first_fit_recp_def - Create a recipe grouping
6791 * @hw: pointer to the hardware structure
6792 * @lkup_exts: an array of protocol header extractions
6793 * @rg_list: pointer to a list that stores new recipe groups
6794 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6796 * Using first fit algorithm, take all the words that are still not done
6797 * and start grouping them in 4-word groups. Each group makes up one
6800 static enum ice_status
6801 ice_create_first_fit_recp_def(struct ice_hw *hw,
6802 struct ice_prot_lkup_ext *lkup_exts,
6803 struct LIST_HEAD_TYPE *rg_list,
6806 struct ice_pref_recipe_group *grp = NULL;
6811 if (!lkup_exts->n_val_words) {
6812 struct ice_recp_grp_entry *entry;
6814 entry = (struct ice_recp_grp_entry *)
6815 ice_malloc(hw, sizeof(*entry));
6817 return ICE_ERR_NO_MEMORY;
6818 LIST_ADD(&entry->l_entry, rg_list);
6819 grp = &entry->r_group;
6821 grp->n_val_pairs = 0;
6824 /* Walk through every word in the rule to check if it is not done. If so
6825 * then this word needs to be part of a new recipe.
6827 for (j = 0; j < lkup_exts->n_val_words; j++)
6828 if (!ice_is_bit_set(lkup_exts->done, j)) {
6830 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6831 struct ice_recp_grp_entry *entry;
6833 entry = (struct ice_recp_grp_entry *)
6834 ice_malloc(hw, sizeof(*entry));
6836 return ICE_ERR_NO_MEMORY;
6837 LIST_ADD(&entry->l_entry, rg_list);
6838 grp = &entry->r_group;
6842 grp->pairs[grp->n_val_pairs].prot_id =
6843 lkup_exts->fv_words[j].prot_id;
6844 grp->pairs[grp->n_val_pairs].off =
6845 lkup_exts->fv_words[j].off;
6846 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6854 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6855 * @hw: pointer to the hardware structure
6856 * @fv_list: field vector with the extraction sequence information
6857 * @rg_list: recipe groupings with protocol-offset pairs
6859 * Helper function to fill in the field vector indices for protocol-offset
6860 * pairs. These indexes are then ultimately programmed into a recipe.
6862 static enum ice_status
6863 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6864 struct LIST_HEAD_TYPE *rg_list)
6866 struct ice_sw_fv_list_entry *fv;
6867 struct ice_recp_grp_entry *rg;
6868 struct ice_fv_word *fv_ext;
6870 if (LIST_EMPTY(fv_list))
6873 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6874 fv_ext = fv->fv_ptr->ew;
6876 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6879 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6880 struct ice_fv_word *pr;
6885 pr = &rg->r_group.pairs[i];
6886 mask = rg->r_group.mask[i];
6888 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6889 if (fv_ext[j].prot_id == pr->prot_id &&
6890 fv_ext[j].off == pr->off) {
6893 /* Store index of field vector */
6895 rg->fv_mask[i] = mask;
6899 /* Protocol/offset could not be found, caller gave an
6903 return ICE_ERR_PARAM;
6911 * ice_find_free_recp_res_idx - find free result indexes for recipe
6912 * @hw: pointer to hardware structure
6913 * @profiles: bitmap of profiles that will be associated with the new recipe
6914 * @free_idx: pointer to variable to receive the free index bitmap
6916 * The algorithm used here is:
6917 * 1. When creating a new recipe, create a set P which contains all
6918 * Profiles that will be associated with our new recipe
6920 * 2. For each Profile p in set P:
6921 * a. Add all recipes associated with Profile p into set R
6922 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6923 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6924 * i. Or just assume they all have the same possible indexes:
6926 * i.e., PossibleIndexes = 0x0000F00000000000
6928 * 3. For each Recipe r in set R:
6929 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6930 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6932 * FreeIndexes will contain the bits indicating the indexes free for use,
6933 * then the code needs to update the recipe[r].used_result_idx_bits to
6934 * indicate which indexes were selected for use by this recipe.
6937 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6938 ice_bitmap_t *free_idx)
6940 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6941 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6942 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6945 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6946 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6947 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6948 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6950 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6952 /* For each profile we are going to associate the recipe with, add the
6953 * recipes that are associated with that profile. This will give us
6954 * the set of recipes that our recipe may collide with. Also, determine
6955 * what possible result indexes are usable given this set of profiles.
6957 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6958 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6959 ICE_MAX_NUM_RECIPES);
6960 ice_and_bitmap(possible_idx, possible_idx,
6961 hw->switch_info->prof_res_bm[bit],
6965 /* For each recipe that our new recipe may collide with, determine
6966 * which indexes have been used.
6968 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6969 ice_or_bitmap(used_idx, used_idx,
6970 hw->switch_info->recp_list[bit].res_idxs,
6973 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6975 /* return number of free indexes */
6976 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6980 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6981 * @hw: pointer to hardware structure
6982 * @rm: recipe management list entry
6983 * @profiles: bitmap of profiles that will be associated.
6985 static enum ice_status
6986 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6987 ice_bitmap_t *profiles)
6989 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6990 struct ice_aqc_recipe_data_elem *tmp;
6991 struct ice_aqc_recipe_data_elem *buf;
6992 struct ice_recp_grp_entry *entry;
6993 enum ice_status status;
6999 /* When more than one recipe are required, another recipe is needed to
7000 * chain them together. Matching a tunnel metadata ID takes up one of
7001 * the match fields in the chaining recipe reducing the number of
7002 * chained recipes by one.
7004 /* check number of free result indices */
7005 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7006 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7008 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7009 free_res_idx, rm->n_grp_count);
7011 if (rm->n_grp_count > 1) {
7012 if (rm->n_grp_count > free_res_idx)
7013 return ICE_ERR_MAX_LIMIT;
7018 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7019 return ICE_ERR_MAX_LIMIT;
7021 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7022 ICE_MAX_NUM_RECIPES,
7025 return ICE_ERR_NO_MEMORY;
7027 buf = (struct ice_aqc_recipe_data_elem *)
7028 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7030 status = ICE_ERR_NO_MEMORY;
7034 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7035 recipe_count = ICE_MAX_NUM_RECIPES;
7036 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7038 if (status || recipe_count == 0)
7041 /* Allocate the recipe resources, and configure them according to the
7042 * match fields from protocol headers and extracted field vectors.
7044 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7045 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7048 status = ice_alloc_recipe(hw, &entry->rid);
7052 /* Clear the result index of the located recipe, as this will be
7053 * updated, if needed, later in the recipe creation process.
7055 tmp[0].content.result_indx = 0;
7057 buf[recps] = tmp[0];
7058 buf[recps].recipe_indx = (u8)entry->rid;
7059 /* if the recipe is a non-root recipe RID should be programmed
7060 * as 0 for the rules to be applied correctly.
7062 buf[recps].content.rid = 0;
7063 ice_memset(&buf[recps].content.lkup_indx, 0,
7064 sizeof(buf[recps].content.lkup_indx),
7067 /* All recipes use look-up index 0 to match switch ID. */
7068 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7069 buf[recps].content.mask[0] =
7070 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7071 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7074 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7075 buf[recps].content.lkup_indx[i] = 0x80;
7076 buf[recps].content.mask[i] = 0;
7079 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7080 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7081 buf[recps].content.mask[i + 1] =
7082 CPU_TO_LE16(entry->fv_mask[i]);
7085 if (rm->n_grp_count > 1) {
7086 /* Checks to see if there really is a valid result index
7089 if (chain_idx >= ICE_MAX_FV_WORDS) {
7090 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7091 status = ICE_ERR_MAX_LIMIT;
7095 entry->chain_idx = chain_idx;
7096 buf[recps].content.result_indx =
7097 ICE_AQ_RECIPE_RESULT_EN |
7098 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7099 ICE_AQ_RECIPE_RESULT_DATA_M);
7100 ice_clear_bit(chain_idx, result_idx_bm);
7101 chain_idx = ice_find_first_bit(result_idx_bm,
7105 /* fill recipe dependencies */
7106 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7107 ICE_MAX_NUM_RECIPES);
7108 ice_set_bit(buf[recps].recipe_indx,
7109 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7110 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7114 if (rm->n_grp_count == 1) {
7115 rm->root_rid = buf[0].recipe_indx;
7116 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7117 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7118 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7119 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7120 sizeof(buf[0].recipe_bitmap),
7121 ICE_NONDMA_TO_NONDMA);
7123 status = ICE_ERR_BAD_PTR;
7126 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7127 * the recipe which is getting created if specified
7128 * by user. Usually any advanced switch filter, which results
7129 * into new extraction sequence, ended up creating a new recipe
7130 * of type ROOT and usually recipes are associated with profiles
7131 * Switch rule referreing newly created recipe, needs to have
7132 * either/or 'fwd' or 'join' priority, otherwise switch rule
7133 * evaluation will not happen correctly. In other words, if
7134 * switch rule to be evaluated on priority basis, then recipe
7135 * needs to have priority, otherwise it will be evaluated last.
7137 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7139 struct ice_recp_grp_entry *last_chain_entry;
7142 /* Allocate the last recipe that will chain the outcomes of the
7143 * other recipes together
7145 status = ice_alloc_recipe(hw, &rid);
7149 buf[recps].recipe_indx = (u8)rid;
7150 buf[recps].content.rid = (u8)rid;
7151 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7152 /* the new entry created should also be part of rg_list to
7153 * make sure we have complete recipe
7155 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7156 sizeof(*last_chain_entry));
7157 if (!last_chain_entry) {
7158 status = ICE_ERR_NO_MEMORY;
7161 last_chain_entry->rid = rid;
7162 ice_memset(&buf[recps].content.lkup_indx, 0,
7163 sizeof(buf[recps].content.lkup_indx),
7165 /* All recipes use look-up index 0 to match switch ID. */
7166 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7167 buf[recps].content.mask[0] =
7168 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7169 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7170 buf[recps].content.lkup_indx[i] =
7171 ICE_AQ_RECIPE_LKUP_IGNORE;
7172 buf[recps].content.mask[i] = 0;
7176 /* update r_bitmap with the recp that is used for chaining */
7177 ice_set_bit(rid, rm->r_bitmap);
7178 /* this is the recipe that chains all the other recipes so it
7179 * should not have a chaining ID to indicate the same
7181 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7182 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7184 last_chain_entry->fv_idx[i] = entry->chain_idx;
7185 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7186 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7187 ice_set_bit(entry->rid, rm->r_bitmap);
7189 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7190 if (sizeof(buf[recps].recipe_bitmap) >=
7191 sizeof(rm->r_bitmap)) {
7192 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7193 sizeof(buf[recps].recipe_bitmap),
7194 ICE_NONDMA_TO_NONDMA);
7196 status = ICE_ERR_BAD_PTR;
7199 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7202 rm->root_rid = (u8)rid;
7204 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7208 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7209 ice_release_change_lock(hw);
7213 /* Every recipe that just got created add it to the recipe
7216 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7217 struct ice_switch_info *sw = hw->switch_info;
7218 bool is_root, idx_found = false;
7219 struct ice_sw_recipe *recp;
7220 u16 idx, buf_idx = 0;
7222 /* find buffer index for copying some data */
7223 for (idx = 0; idx < rm->n_grp_count; idx++)
7224 if (buf[idx].recipe_indx == entry->rid) {
7230 status = ICE_ERR_OUT_OF_RANGE;
7234 recp = &sw->recp_list[entry->rid];
7235 is_root = (rm->root_rid == entry->rid);
7236 recp->is_root = is_root;
7238 recp->root_rid = entry->rid;
7239 recp->big_recp = (is_root && rm->n_grp_count > 1);
7241 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7242 entry->r_group.n_val_pairs *
7243 sizeof(struct ice_fv_word),
7244 ICE_NONDMA_TO_NONDMA);
7246 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7247 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7249 /* Copy non-result fv index values and masks to recipe. This
7250 * call will also update the result recipe bitmask.
7252 ice_collect_result_idx(&buf[buf_idx], recp);
7254 /* for non-root recipes, also copy to the root, this allows
7255 * easier matching of a complete chained recipe
7258 ice_collect_result_idx(&buf[buf_idx],
7259 &sw->recp_list[rm->root_rid]);
7261 recp->n_ext_words = entry->r_group.n_val_pairs;
7262 recp->chain_idx = entry->chain_idx;
7263 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7264 recp->n_grp_count = rm->n_grp_count;
7265 recp->tun_type = rm->tun_type;
7266 recp->recp_created = true;
7280 * ice_create_recipe_group - creates recipe group
7281 * @hw: pointer to hardware structure
7282 * @rm: recipe management list entry
7283 * @lkup_exts: lookup elements
7285 static enum ice_status
7286 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7287 struct ice_prot_lkup_ext *lkup_exts)
7289 enum ice_status status;
7292 rm->n_grp_count = 0;
7294 /* Create recipes for words that are marked not done by packing them
7297 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7298 &rm->rg_list, &recp_count);
7300 rm->n_grp_count += recp_count;
7301 rm->n_ext_words = lkup_exts->n_val_words;
7302 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7303 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7304 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7305 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7312 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7313 * @hw: pointer to hardware structure
7314 * @lkups: lookup elements or match criteria for the advanced recipe, one
7315 * structure per protocol header
7316 * @lkups_cnt: number of protocols
7317 * @bm: bitmap of field vectors to consider
7318 * @fv_list: pointer to a list that holds the returned field vectors
7320 static enum ice_status
7321 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7322 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7324 enum ice_status status;
7331 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7333 return ICE_ERR_NO_MEMORY;
7335 for (i = 0; i < lkups_cnt; i++)
7336 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7337 status = ICE_ERR_CFG;
7341 /* Find field vectors that include all specified protocol types */
7342 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7345 ice_free(hw, prot_ids);
7350 * ice_tun_type_match_word - determine if tun type needs a match mask
7351 * @tun_type: tunnel type
7352 * @mask: mask to be used for the tunnel
7354 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7357 case ICE_SW_TUN_VXLAN_GPE:
7358 case ICE_SW_TUN_GENEVE:
7359 case ICE_SW_TUN_VXLAN:
7360 case ICE_SW_TUN_NVGRE:
7361 case ICE_SW_TUN_UDP:
7362 case ICE_ALL_TUNNELS:
7363 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7364 case ICE_NON_TUN_QINQ:
7365 case ICE_SW_TUN_PPPOE_QINQ:
7366 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7367 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7368 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7369 *mask = ICE_TUN_FLAG_MASK;
7372 case ICE_SW_TUN_GENEVE_VLAN:
7373 case ICE_SW_TUN_VXLAN_VLAN:
7374 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7384 * ice_add_special_words - Add words that are not protocols, such as metadata
7385 * @rinfo: other information regarding the rule e.g. priority and action info
7386 * @lkup_exts: lookup word structure
7388 static enum ice_status
7389 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7390 struct ice_prot_lkup_ext *lkup_exts)
7394 /* If this is a tunneled packet, then add recipe index to match the
7395 * tunnel bit in the packet metadata flags.
7397 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7398 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7399 u8 word = lkup_exts->n_val_words++;
7401 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7402 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7403 lkup_exts->field_mask[word] = mask;
7405 return ICE_ERR_MAX_LIMIT;
7412 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7413 * @hw: pointer to hardware structure
7414 * @rinfo: other information regarding the rule e.g. priority and action info
7415 * @bm: pointer to memory for returning the bitmap of field vectors
7418 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7421 enum ice_prof_type prof_type;
7423 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7425 switch (rinfo->tun_type) {
7427 case ICE_NON_TUN_QINQ:
7428 prof_type = ICE_PROF_NON_TUN;
7430 case ICE_ALL_TUNNELS:
7431 prof_type = ICE_PROF_TUN_ALL;
7433 case ICE_SW_TUN_VXLAN_GPE:
7434 case ICE_SW_TUN_GENEVE:
7435 case ICE_SW_TUN_GENEVE_VLAN:
7436 case ICE_SW_TUN_VXLAN:
7437 case ICE_SW_TUN_VXLAN_VLAN:
7438 case ICE_SW_TUN_UDP:
7439 case ICE_SW_TUN_GTP:
7440 prof_type = ICE_PROF_TUN_UDP;
7442 case ICE_SW_TUN_NVGRE:
7443 prof_type = ICE_PROF_TUN_GRE;
7445 case ICE_SW_TUN_PPPOE:
7446 case ICE_SW_TUN_PPPOE_QINQ:
7447 prof_type = ICE_PROF_TUN_PPPOE;
7449 case ICE_SW_TUN_PPPOE_PAY:
7450 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7451 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7453 case ICE_SW_TUN_PPPOE_IPV4:
7454 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7455 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7456 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7457 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7459 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7460 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7462 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7463 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7465 case ICE_SW_TUN_PPPOE_IPV6:
7466 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7467 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7468 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7469 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7471 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7472 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7474 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7475 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7477 case ICE_SW_TUN_PROFID_IPV6_ESP:
7478 case ICE_SW_TUN_IPV6_ESP:
7479 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7481 case ICE_SW_TUN_PROFID_IPV6_AH:
7482 case ICE_SW_TUN_IPV6_AH:
7483 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7485 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7486 case ICE_SW_TUN_IPV6_L2TPV3:
7487 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7489 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7490 case ICE_SW_TUN_IPV6_NAT_T:
7491 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7493 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7494 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7496 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7497 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7499 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7500 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7502 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7503 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7505 case ICE_SW_TUN_IPV4_NAT_T:
7506 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7508 case ICE_SW_TUN_IPV4_L2TPV3:
7509 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7511 case ICE_SW_TUN_IPV4_ESP:
7512 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7514 case ICE_SW_TUN_IPV4_AH:
7515 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7517 case ICE_SW_IPV4_TCP:
7518 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7520 case ICE_SW_IPV4_UDP:
7521 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7523 case ICE_SW_IPV6_TCP:
7524 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7526 case ICE_SW_IPV6_UDP:
7527 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7529 case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7530 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7532 case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7533 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7535 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7536 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7537 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7538 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7540 case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7541 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7543 case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7544 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7546 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7547 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7548 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7549 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7551 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7552 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7554 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7555 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7557 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7558 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7559 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7560 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7562 case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7563 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7565 case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7566 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7568 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7569 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7570 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7571 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7573 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7574 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7576 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7577 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7579 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7580 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7581 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7582 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7584 case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7585 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7587 case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7588 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7590 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7591 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7592 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7593 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7595 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7596 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7598 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7599 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7601 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7602 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7603 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7604 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7606 case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7607 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7609 case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7610 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7612 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7613 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7614 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7615 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7617 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7618 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7620 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7621 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7623 case ICE_SW_TUN_AND_NON_TUN:
7624 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7626 prof_type = ICE_PROF_ALL;
7630 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7634 * ice_is_prof_rule - determine if rule type is a profile rule
7635 * @type: the rule type
7637 * if the rule type is a profile rule, that means that there no field value
7638 * match required, in this case just a profile hit is required.
7640 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7643 case ICE_SW_TUN_PROFID_IPV6_ESP:
7644 case ICE_SW_TUN_PROFID_IPV6_AH:
7645 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7646 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7647 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7648 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7649 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7650 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7660 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7661 * @hw: pointer to hardware structure
7662 * @lkups: lookup elements or match criteria for the advanced recipe, one
7663 * structure per protocol header
7664 * @lkups_cnt: number of protocols
7665 * @rinfo: other information regarding the rule e.g. priority and action info
7666 * @rid: return the recipe ID of the recipe created
7668 static enum ice_status
7669 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7670 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7672 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7673 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7674 struct ice_prot_lkup_ext *lkup_exts;
7675 struct ice_recp_grp_entry *r_entry;
7676 struct ice_sw_fv_list_entry *fvit;
7677 struct ice_recp_grp_entry *r_tmp;
7678 struct ice_sw_fv_list_entry *tmp;
7679 enum ice_status status = ICE_SUCCESS;
7680 struct ice_sw_recipe *rm;
7683 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7684 return ICE_ERR_PARAM;
7686 lkup_exts = (struct ice_prot_lkup_ext *)
7687 ice_malloc(hw, sizeof(*lkup_exts));
7689 return ICE_ERR_NO_MEMORY;
7691 /* Determine the number of words to be matched and if it exceeds a
7692 * recipe's restrictions
7694 for (i = 0; i < lkups_cnt; i++) {
7697 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7698 status = ICE_ERR_CFG;
7699 goto err_free_lkup_exts;
7702 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7704 status = ICE_ERR_CFG;
7705 goto err_free_lkup_exts;
7709 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7711 status = ICE_ERR_NO_MEMORY;
7712 goto err_free_lkup_exts;
7715 /* Get field vectors that contain fields extracted from all the protocol
7716 * headers being programmed.
7718 INIT_LIST_HEAD(&rm->fv_list);
7719 INIT_LIST_HEAD(&rm->rg_list);
7721 /* Get bitmap of field vectors (profiles) that are compatible with the
7722 * rule request; only these will be searched in the subsequent call to
7725 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7727 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7731 /* Create any special protocol/offset pairs, such as looking at tunnel
7732 * bits by extracting metadata
7734 status = ice_add_special_words(rinfo, lkup_exts);
7736 goto err_free_lkup_exts;
7738 /* Group match words into recipes using preferred recipe grouping
7741 status = ice_create_recipe_group(hw, rm, lkup_exts);
7745 /* set the recipe priority if specified */
7746 rm->priority = (u8)rinfo->priority;
7748 /* Find offsets from the field vector. Pick the first one for all the
7751 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7755 /* An empty FV list means to use all the profiles returned in the
7758 if (LIST_EMPTY(&rm->fv_list)) {
7761 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7762 struct ice_sw_fv_list_entry *fvl;
7764 fvl = (struct ice_sw_fv_list_entry *)
7765 ice_malloc(hw, sizeof(*fvl));
7769 fvl->profile_id = j;
7770 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7774 /* get bitmap of all profiles the recipe will be associated with */
7775 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7776 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7778 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7779 ice_set_bit((u16)fvit->profile_id, profiles);
7782 /* Look for a recipe which matches our requested fv / mask list */
7783 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7784 if (*rid < ICE_MAX_NUM_RECIPES)
7785 /* Success if found a recipe that match the existing criteria */
7788 rm->tun_type = rinfo->tun_type;
7789 /* Recipe we need does not exist, add a recipe */
7790 status = ice_add_sw_recipe(hw, rm, profiles);
7794 /* Associate all the recipes created with all the profiles in the
7795 * common field vector.
7797 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7799 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7802 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7803 (u8 *)r_bitmap, NULL);
7807 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7808 ICE_MAX_NUM_RECIPES);
7809 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7813 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7816 ice_release_change_lock(hw);
7821 /* Update profile to recipe bitmap array */
7822 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7823 ICE_MAX_NUM_RECIPES);
7825 /* Update recipe to profile bitmap array */
7826 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7827 ice_set_bit((u16)fvit->profile_id,
7828 recipe_to_profile[j]);
7831 *rid = rm->root_rid;
7832 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7833 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7835 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7836 ice_recp_grp_entry, l_entry) {
7837 LIST_DEL(&r_entry->l_entry);
7838 ice_free(hw, r_entry);
7841 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7843 LIST_DEL(&fvit->list_entry);
7848 ice_free(hw, rm->root_buf);
7853 ice_free(hw, lkup_exts);
7859 * ice_find_dummy_packet - find dummy packet by tunnel type
7861 * @lkups: lookup elements or match criteria for the advanced recipe, one
7862 * structure per protocol header
7863 * @lkups_cnt: number of protocols
7864 * @tun_type: tunnel type from the match criteria
7865 * @pkt: dummy packet to fill according to filter match criteria
7866 * @pkt_len: packet length of dummy packet
7867 * @offsets: pointer to receive the pointer to the offsets for the packet
7870 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7871 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7873 const struct ice_dummy_pkt_offsets **offsets)
7875 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7876 bool gre = false, mpls = false;
7879 for (i = 0; i < lkups_cnt; i++) {
7880 if (lkups[i].type == ICE_UDP_ILOS)
7882 else if (lkups[i].type == ICE_TCP_IL)
7884 else if (lkups[i].type == ICE_IPV6_OFOS)
7886 else if (lkups[i].type == ICE_VLAN_OFOS)
7888 else if (lkups[i].type == ICE_ETYPE_OL &&
7889 lkups[i].h_u.ethertype.ethtype_id ==
7890 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7891 lkups[i].m_u.ethertype.ethtype_id ==
7892 CPU_TO_BE16(0xFFFF))
7894 else if (lkups[i].type == ICE_IPV4_OFOS &&
7895 lkups[i].h_u.ipv4_hdr.protocol ==
7896 ICE_IPV4_NVGRE_PROTO_ID &&
7897 lkups[i].m_u.ipv4_hdr.protocol ==
7900 else if (lkups[i].type == ICE_PPPOE &&
7901 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7902 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7903 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7906 else if (lkups[i].type == ICE_IPV4_IL &&
7907 lkups[i].h_u.ipv4_hdr.protocol ==
7909 lkups[i].m_u.ipv4_hdr.protocol ==
7912 else if (lkups[i].type == ICE_ETYPE_OL &&
7913 lkups[i].h_u.ethertype.ethtype_id ==
7914 CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
7915 lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
7919 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7920 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7921 *pkt = dummy_qinq_ipv6_pkt;
7922 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7923 *offsets = dummy_qinq_ipv6_packet_offsets;
7925 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7926 tun_type == ICE_NON_TUN_QINQ) {
7927 *pkt = dummy_qinq_ipv4_pkt;
7928 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7929 *offsets = dummy_qinq_ipv4_packet_offsets;
7933 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7934 *pkt = dummy_qinq_pppoe_ipv6_packet;
7935 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7936 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7938 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7939 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7940 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7941 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7943 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
7944 *pkt = dummy_qinq_pppoe_ipv6_packet;
7945 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7946 *offsets = dummy_qinq_pppoe_packet_offsets;
7948 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7949 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7950 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7951 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7952 *offsets = dummy_qinq_pppoe_packet_offsets;
7956 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7957 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7958 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7959 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7961 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7962 *pkt = dummy_ipv6_gtp_packet;
7963 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
7964 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7968 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7969 *pkt = dummy_ipv4_esp_pkt;
7970 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7971 *offsets = dummy_ipv4_esp_packet_offsets;
7975 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7976 *pkt = dummy_ipv6_esp_pkt;
7977 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7978 *offsets = dummy_ipv6_esp_packet_offsets;
7982 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7983 *pkt = dummy_ipv4_ah_pkt;
7984 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7985 *offsets = dummy_ipv4_ah_packet_offsets;
7989 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7990 *pkt = dummy_ipv6_ah_pkt;
7991 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7992 *offsets = dummy_ipv6_ah_packet_offsets;
7996 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7997 *pkt = dummy_ipv4_nat_pkt;
7998 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7999 *offsets = dummy_ipv4_nat_packet_offsets;
8003 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8004 *pkt = dummy_ipv6_nat_pkt;
8005 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
8006 *offsets = dummy_ipv6_nat_packet_offsets;
8010 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8011 *pkt = dummy_ipv4_l2tpv3_pkt;
8012 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8013 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
8017 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8018 *pkt = dummy_ipv6_l2tpv3_pkt;
8019 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8020 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8024 if (tun_type == ICE_SW_TUN_GTP) {
8025 *pkt = dummy_udp_gtp_packet;
8026 *pkt_len = sizeof(dummy_udp_gtp_packet);
8027 *offsets = dummy_udp_gtp_packet_offsets;
8031 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8032 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8033 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8034 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8035 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8039 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8040 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8041 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8042 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8043 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8047 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8048 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8049 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8050 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8051 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8055 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8056 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8057 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8058 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8059 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8063 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8064 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8065 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8066 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8067 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8071 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8072 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8073 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8074 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8075 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8079 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8080 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8081 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8082 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8083 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8087 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8088 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8089 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8090 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8091 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8095 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8096 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8097 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8098 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8099 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8103 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8104 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8105 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8106 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8107 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8111 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8112 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8113 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8114 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8115 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8119 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8120 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8121 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8122 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8123 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8127 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8128 *pkt = dummy_pppoe_ipv6_packet;
8129 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8130 *offsets = dummy_pppoe_packet_offsets;
8132 } else if (tun_type == ICE_SW_TUN_PPPOE ||
8133 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8134 *pkt = dummy_pppoe_ipv4_packet;
8135 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8136 *offsets = dummy_pppoe_packet_offsets;
8140 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8141 *pkt = dummy_pppoe_ipv4_packet;
8142 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8143 *offsets = dummy_pppoe_packet_ipv4_offsets;
8147 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8148 *pkt = dummy_pppoe_ipv4_tcp_packet;
8149 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8150 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8154 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8155 *pkt = dummy_pppoe_ipv4_udp_packet;
8156 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8157 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8161 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8162 *pkt = dummy_pppoe_ipv6_packet;
8163 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8164 *offsets = dummy_pppoe_packet_ipv6_offsets;
8168 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8169 *pkt = dummy_pppoe_ipv6_tcp_packet;
8170 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8171 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8175 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8176 *pkt = dummy_pppoe_ipv6_udp_packet;
8177 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8178 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8182 if (tun_type == ICE_SW_IPV4_TCP) {
8183 *pkt = dummy_tcp_packet;
8184 *pkt_len = sizeof(dummy_tcp_packet);
8185 *offsets = dummy_tcp_packet_offsets;
8189 if (tun_type == ICE_SW_IPV4_UDP) {
8190 *pkt = dummy_udp_packet;
8191 *pkt_len = sizeof(dummy_udp_packet);
8192 *offsets = dummy_udp_packet_offsets;
8196 if (tun_type == ICE_SW_IPV6_TCP) {
8197 *pkt = dummy_tcp_ipv6_packet;
8198 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8199 *offsets = dummy_tcp_ipv6_packet_offsets;
8203 if (tun_type == ICE_SW_IPV6_UDP) {
8204 *pkt = dummy_udp_ipv6_packet;
8205 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8206 *offsets = dummy_udp_ipv6_packet_offsets;
8210 if (tun_type == ICE_ALL_TUNNELS) {
8211 *pkt = dummy_gre_udp_packet;
8212 *pkt_len = sizeof(dummy_gre_udp_packet);
8213 *offsets = dummy_gre_udp_packet_offsets;
8217 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8219 *pkt = dummy_gre_tcp_packet;
8220 *pkt_len = sizeof(dummy_gre_tcp_packet);
8221 *offsets = dummy_gre_tcp_packet_offsets;
8225 *pkt = dummy_gre_udp_packet;
8226 *pkt_len = sizeof(dummy_gre_udp_packet);
8227 *offsets = dummy_gre_udp_packet_offsets;
8231 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8232 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8233 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8234 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8236 *pkt = dummy_udp_tun_tcp_packet;
8237 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8238 *offsets = dummy_udp_tun_tcp_packet_offsets;
8242 *pkt = dummy_udp_tun_udp_packet;
8243 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8244 *offsets = dummy_udp_tun_udp_packet_offsets;
8250 *pkt = dummy_vlan_udp_packet;
8251 *pkt_len = sizeof(dummy_vlan_udp_packet);
8252 *offsets = dummy_vlan_udp_packet_offsets;
8255 *pkt = dummy_udp_packet;
8256 *pkt_len = sizeof(dummy_udp_packet);
8257 *offsets = dummy_udp_packet_offsets;
8259 } else if (udp && ipv6) {
8261 *pkt = dummy_vlan_udp_ipv6_packet;
8262 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8263 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8266 *pkt = dummy_udp_ipv6_packet;
8267 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8268 *offsets = dummy_udp_ipv6_packet_offsets;
8270 } else if ((tcp && ipv6) || ipv6) {
8272 *pkt = dummy_vlan_tcp_ipv6_packet;
8273 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8274 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8277 *pkt = dummy_tcp_ipv6_packet;
8278 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8279 *offsets = dummy_tcp_ipv6_packet_offsets;
8284 *pkt = dummy_vlan_tcp_packet;
8285 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8286 *offsets = dummy_vlan_tcp_packet_offsets;
8288 *pkt = dummy_mpls_packet;
8289 *pkt_len = sizeof(dummy_mpls_packet);
8290 *offsets = dummy_mpls_packet_offsets;
8292 *pkt = dummy_tcp_packet;
8293 *pkt_len = sizeof(dummy_tcp_packet);
8294 *offsets = dummy_tcp_packet_offsets;
8299 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8301 * @lkups: lookup elements or match criteria for the advanced recipe, one
8302 * structure per protocol header
8303 * @lkups_cnt: number of protocols
8304 * @s_rule: stores rule information from the match criteria
8305 * @dummy_pkt: dummy packet to fill according to filter match criteria
8306 * @pkt_len: packet length of dummy packet
8307 * @offsets: offset info for the dummy packet
8309 static enum ice_status
8310 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8311 struct ice_aqc_sw_rules_elem *s_rule,
8312 const u8 *dummy_pkt, u16 pkt_len,
8313 const struct ice_dummy_pkt_offsets *offsets)
8318 /* Start with a packet with a pre-defined/dummy content. Then, fill
8319 * in the header values to be looked up or matched.
8321 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8323 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8325 for (i = 0; i < lkups_cnt; i++) {
8326 enum ice_protocol_type type;
8327 u16 offset = 0, len = 0, j;
8330 /* find the start of this layer; it should be found since this
8331 * was already checked when search for the dummy packet
8333 type = lkups[i].type;
8334 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8335 if (type == offsets[j].type) {
8336 offset = offsets[j].offset;
8341 /* this should never happen in a correct calling sequence */
8343 return ICE_ERR_PARAM;
8345 switch (lkups[i].type) {
8348 len = sizeof(struct ice_ether_hdr);
8351 len = sizeof(struct ice_ethtype_hdr);
8356 len = sizeof(struct ice_vlan_hdr);
8360 len = sizeof(struct ice_ipv4_hdr);
8364 len = sizeof(struct ice_ipv6_hdr);
8369 len = sizeof(struct ice_l4_hdr);
8372 len = sizeof(struct ice_sctp_hdr);
8375 len = sizeof(struct ice_nvgre);
8380 len = sizeof(struct ice_udp_tnl_hdr);
8384 case ICE_GTP_NO_PAY:
8385 len = sizeof(struct ice_udp_gtp_hdr);
8388 len = sizeof(struct ice_pppoe_hdr);
8391 len = sizeof(struct ice_esp_hdr);
8394 len = sizeof(struct ice_nat_t_hdr);
8397 len = sizeof(struct ice_ah_hdr);
8400 len = sizeof(struct ice_l2tpv3_sess_hdr);
8403 return ICE_ERR_PARAM;
8406 /* the length should be a word multiple */
8407 if (len % ICE_BYTES_PER_WORD)
8410 /* We have the offset to the header start, the length, the
8411 * caller's header values and mask. Use this information to
8412 * copy the data into the dummy packet appropriately based on
8413 * the mask. Note that we need to only write the bits as
8414 * indicated by the mask to make sure we don't improperly write
8415 * over any significant packet data.
8417 for (j = 0; j < len / sizeof(u16); j++)
8418 if (((u16 *)&lkups[i].m_u)[j])
8419 ((u16 *)(pkt + offset))[j] =
8420 (((u16 *)(pkt + offset))[j] &
8421 ~((u16 *)&lkups[i].m_u)[j]) |
8422 (((u16 *)&lkups[i].h_u)[j] &
8423 ((u16 *)&lkups[i].m_u)[j]);
8426 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8432 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8433 * @hw: pointer to the hardware structure
8434 * @tun_type: tunnel type
8435 * @pkt: dummy packet to fill in
8436 * @offsets: offset info for the dummy packet
8438 static enum ice_status
8439 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8440 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8445 case ICE_SW_TUN_AND_NON_TUN:
8446 case ICE_SW_TUN_VXLAN_GPE:
8447 case ICE_SW_TUN_VXLAN:
8448 case ICE_SW_TUN_VXLAN_VLAN:
8449 case ICE_SW_TUN_UDP:
8450 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8454 case ICE_SW_TUN_GENEVE:
8455 case ICE_SW_TUN_GENEVE_VLAN:
8456 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8461 /* Nothing needs to be done for this tunnel type */
8465 /* Find the outer UDP protocol header and insert the port number */
8466 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8467 if (offsets[i].type == ICE_UDP_OF) {
8468 struct ice_l4_hdr *hdr;
8471 offset = offsets[i].offset;
8472 hdr = (struct ice_l4_hdr *)&pkt[offset];
8473 hdr->dst_port = CPU_TO_BE16(open_port);
8483 * ice_find_adv_rule_entry - Search a rule entry
8484 * @hw: pointer to the hardware structure
8485 * @lkups: lookup elements or match criteria for the advanced recipe, one
8486 * structure per protocol header
8487 * @lkups_cnt: number of protocols
8488 * @recp_id: recipe ID for which we are finding the rule
8489 * @rinfo: other information regarding the rule e.g. priority and action info
8491 * Helper function to search for a given advance rule entry
8492 * Returns pointer to entry storing the rule if found
8494 static struct ice_adv_fltr_mgmt_list_entry *
8495 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8496 u16 lkups_cnt, u16 recp_id,
8497 struct ice_adv_rule_info *rinfo)
8499 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8500 struct ice_switch_info *sw = hw->switch_info;
8503 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8504 ice_adv_fltr_mgmt_list_entry, list_entry) {
8505 bool lkups_matched = true;
8507 if (lkups_cnt != list_itr->lkups_cnt)
8509 for (i = 0; i < list_itr->lkups_cnt; i++)
8510 if (memcmp(&list_itr->lkups[i], &lkups[i],
8512 lkups_matched = false;
8515 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8516 rinfo->tun_type == list_itr->rule_info.tun_type &&
8524 * ice_adv_add_update_vsi_list
8525 * @hw: pointer to the hardware structure
8526 * @m_entry: pointer to current adv filter management list entry
8527 * @cur_fltr: filter information from the book keeping entry
8528 * @new_fltr: filter information with the new VSI to be added
8530 * Call AQ command to add or update previously created VSI list with new VSI.
8532 * Helper function to do book keeping associated with adding filter information
8533 * The algorithm to do the booking keeping is described below :
8534 * When a VSI needs to subscribe to a given advanced filter
8535 * if only one VSI has been added till now
8536 * Allocate a new VSI list and add two VSIs
8537 * to this list using switch rule command
8538 * Update the previously created switch rule with the
8539 * newly created VSI list ID
8540 * if a VSI list was previously created
8541 * Add the new VSI to the previously created VSI list set
8542 * using the update switch rule command
8544 static enum ice_status
8545 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8546 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8547 struct ice_adv_rule_info *cur_fltr,
8548 struct ice_adv_rule_info *new_fltr)
8550 enum ice_status status;
8551 u16 vsi_list_id = 0;
8553 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8554 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8555 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8556 return ICE_ERR_NOT_IMPL;
8558 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8559 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8560 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8561 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8562 return ICE_ERR_NOT_IMPL;
8564 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8565 /* Only one entry existed in the mapping and it was not already
8566 * a part of a VSI list. So, create a VSI list with the old and
8569 struct ice_fltr_info tmp_fltr;
8570 u16 vsi_handle_arr[2];
8572 /* A rule already exists with the new VSI being added */
8573 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8574 new_fltr->sw_act.fwd_id.hw_vsi_id)
8575 return ICE_ERR_ALREADY_EXISTS;
8577 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8578 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8579 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8585 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8586 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8587 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8588 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8589 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8590 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8592 /* Update the previous switch rule of "forward to VSI" to
8595 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8599 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8600 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8601 m_entry->vsi_list_info =
8602 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8605 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8607 if (!m_entry->vsi_list_info)
8610 /* A rule already exists with the new VSI being added */
8611 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8614 /* Update the previously created VSI list set with
8615 * the new VSI ID passed in
8617 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8619 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8621 ice_aqc_opc_update_sw_rules,
8623 /* update VSI list mapping info with new VSI ID */
8625 ice_set_bit(vsi_handle,
8626 m_entry->vsi_list_info->vsi_map);
8629 m_entry->vsi_count++;
8634 * ice_add_adv_rule - helper function to create an advanced switch rule
8635 * @hw: pointer to the hardware structure
8636 * @lkups: information on the words that needs to be looked up. All words
8637 * together makes one recipe
8638 * @lkups_cnt: num of entries in the lkups array
8639 * @rinfo: other information related to the rule that needs to be programmed
8640 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8641 * ignored is case of error.
8643 * This function can program only 1 rule at a time. The lkups is used to
8644 * describe the all the words that forms the "lookup" portion of the recipe.
8645 * These words can span multiple protocols. Callers to this function need to
8646 * pass in a list of protocol headers with lookup information along and mask
8647 * that determines which words are valid from the given protocol header.
8648 * rinfo describes other information related to this rule such as forwarding
8649 * IDs, priority of this rule, etc.
8652 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8653 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8654 struct ice_rule_query_data *added_entry)
8656 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8657 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8658 const struct ice_dummy_pkt_offsets *pkt_offsets;
8659 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8660 struct LIST_HEAD_TYPE *rule_head;
8661 struct ice_switch_info *sw;
8662 enum ice_status status;
8663 const u8 *pkt = NULL;
8669 /* Initialize profile to result index bitmap */
8670 if (!hw->switch_info->prof_res_bm_init) {
8671 hw->switch_info->prof_res_bm_init = 1;
8672 ice_init_prof_result_bm(hw);
8675 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8676 if (!prof_rule && !lkups_cnt)
8677 return ICE_ERR_PARAM;
8679 /* get # of words we need to match */
8681 for (i = 0; i < lkups_cnt; i++) {
8684 ptr = (u16 *)&lkups[i].m_u;
8685 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8691 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8692 return ICE_ERR_PARAM;
8694 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8695 return ICE_ERR_PARAM;
8698 /* make sure that we can locate a dummy packet */
8699 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8702 status = ICE_ERR_PARAM;
8703 goto err_ice_add_adv_rule;
8706 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8707 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8708 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8709 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8712 vsi_handle = rinfo->sw_act.vsi_handle;
8713 if (!ice_is_vsi_valid(hw, vsi_handle))
8714 return ICE_ERR_PARAM;
8716 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8717 rinfo->sw_act.fwd_id.hw_vsi_id =
8718 ice_get_hw_vsi_num(hw, vsi_handle);
8719 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8720 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8722 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8725 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8727 /* we have to add VSI to VSI_LIST and increment vsi_count.
8728 * Also Update VSI list so that we can change forwarding rule
8729 * if the rule already exists, we will check if it exists with
8730 * same vsi_id, if not then add it to the VSI list if it already
8731 * exists if not then create a VSI list and add the existing VSI
8732 * ID and the new VSI ID to the list
8733 * We will add that VSI to the list
8735 status = ice_adv_add_update_vsi_list(hw, m_entry,
8736 &m_entry->rule_info,
8739 added_entry->rid = rid;
8740 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8741 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8745 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8746 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8748 return ICE_ERR_NO_MEMORY;
8749 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8750 switch (rinfo->sw_act.fltr_act) {
8751 case ICE_FWD_TO_VSI:
8752 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8753 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8754 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8757 act |= ICE_SINGLE_ACT_TO_Q;
8758 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8759 ICE_SINGLE_ACT_Q_INDEX_M;
8761 case ICE_FWD_TO_QGRP:
8762 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8763 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8764 act |= ICE_SINGLE_ACT_TO_Q;
8765 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8766 ICE_SINGLE_ACT_Q_INDEX_M;
8767 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8768 ICE_SINGLE_ACT_Q_REGION_M;
8770 case ICE_DROP_PACKET:
8771 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8772 ICE_SINGLE_ACT_VALID_BIT;
8775 status = ICE_ERR_CFG;
8776 goto err_ice_add_adv_rule;
8779 /* set the rule LOOKUP type based on caller specified 'RX'
8780 * instead of hardcoding it to be either LOOKUP_TX/RX
8782 * for 'RX' set the source to be the port number
8783 * for 'TX' set the source to be the source HW VSI number (determined
8787 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8788 s_rule->pdata.lkup_tx_rx.src =
8789 CPU_TO_LE16(hw->port_info->lport);
8791 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8792 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8795 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8796 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8798 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8799 pkt_len, pkt_offsets);
8801 goto err_ice_add_adv_rule;
8803 if (rinfo->tun_type != ICE_NON_TUN &&
8804 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8805 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8806 s_rule->pdata.lkup_tx_rx.hdr,
8809 goto err_ice_add_adv_rule;
8812 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8813 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8816 goto err_ice_add_adv_rule;
8817 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8818 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8820 status = ICE_ERR_NO_MEMORY;
8821 goto err_ice_add_adv_rule;
8824 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8825 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8826 ICE_NONDMA_TO_NONDMA);
8827 if (!adv_fltr->lkups && !prof_rule) {
8828 status = ICE_ERR_NO_MEMORY;
8829 goto err_ice_add_adv_rule;
8832 adv_fltr->lkups_cnt = lkups_cnt;
8833 adv_fltr->rule_info = *rinfo;
8834 adv_fltr->rule_info.fltr_rule_id =
8835 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8836 sw = hw->switch_info;
8837 sw->recp_list[rid].adv_rule = true;
8838 rule_head = &sw->recp_list[rid].filt_rules;
8840 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8841 adv_fltr->vsi_count = 1;
8843 /* Add rule entry to book keeping list */
8844 LIST_ADD(&adv_fltr->list_entry, rule_head);
8846 added_entry->rid = rid;
8847 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8848 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8850 err_ice_add_adv_rule:
8851 if (status && adv_fltr) {
8852 ice_free(hw, adv_fltr->lkups);
8853 ice_free(hw, adv_fltr);
8856 ice_free(hw, s_rule);
8862 * ice_adv_rem_update_vsi_list
8863 * @hw: pointer to the hardware structure
8864 * @vsi_handle: VSI handle of the VSI to remove
8865 * @fm_list: filter management entry for which the VSI list management needs to
8868 static enum ice_status
8869 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8870 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8872 struct ice_vsi_list_map_info *vsi_list_info;
8873 enum ice_sw_lkup_type lkup_type;
8874 enum ice_status status;
8877 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8878 fm_list->vsi_count == 0)
8879 return ICE_ERR_PARAM;
8881 /* A rule with the VSI being removed does not exist */
8882 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8883 return ICE_ERR_DOES_NOT_EXIST;
8885 lkup_type = ICE_SW_LKUP_LAST;
8886 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8887 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8888 ice_aqc_opc_update_sw_rules,
8893 fm_list->vsi_count--;
8894 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8895 vsi_list_info = fm_list->vsi_list_info;
8896 if (fm_list->vsi_count == 1) {
8897 struct ice_fltr_info tmp_fltr;
8900 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8902 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8903 return ICE_ERR_OUT_OF_RANGE;
8905 /* Make sure VSI list is empty before removing it below */
8906 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8908 ice_aqc_opc_update_sw_rules,
8913 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8914 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8915 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8916 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8917 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8918 tmp_fltr.fwd_id.hw_vsi_id =
8919 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8920 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8921 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8922 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8924 /* Update the previous switch rule of "MAC forward to VSI" to
8925 * "MAC fwd to VSI list"
8927 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8929 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8930 tmp_fltr.fwd_id.hw_vsi_id, status);
8933 fm_list->vsi_list_info->ref_cnt--;
8935 /* Remove the VSI list since it is no longer used */
8936 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8938 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8939 vsi_list_id, status);
8943 LIST_DEL(&vsi_list_info->list_entry);
8944 ice_free(hw, vsi_list_info);
8945 fm_list->vsi_list_info = NULL;
8952 * ice_rem_adv_rule - removes existing advanced switch rule
8953 * @hw: pointer to the hardware structure
8954 * @lkups: information on the words that needs to be looked up. All words
8955 * together makes one recipe
8956 * @lkups_cnt: num of entries in the lkups array
8957 * @rinfo: Its the pointer to the rule information for the rule
8959 * This function can be used to remove 1 rule at a time. The lkups is
8960 * used to describe all the words that forms the "lookup" portion of the
8961 * rule. These words can span multiple protocols. Callers to this function
8962 * need to pass in a list of protocol headers with lookup information along
8963 * and mask that determines which words are valid from the given protocol
8964 * header. rinfo describes other information related to this rule such as
8965 * forwarding IDs, priority of this rule, etc.
8968 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8969 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8971 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8972 struct ice_prot_lkup_ext lkup_exts;
8973 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8974 enum ice_status status = ICE_SUCCESS;
8975 bool remove_rule = false;
8976 u16 i, rid, vsi_handle;
8978 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8979 for (i = 0; i < lkups_cnt; i++) {
8982 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8985 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8990 /* Create any special protocol/offset pairs, such as looking at tunnel
8991 * bits by extracting metadata
8993 status = ice_add_special_words(rinfo, &lkup_exts);
8997 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
8998 /* If did not find a recipe that match the existing criteria */
8999 if (rid == ICE_MAX_NUM_RECIPES)
9000 return ICE_ERR_PARAM;
9002 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9003 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9004 /* the rule is already removed */
9007 ice_acquire_lock(rule_lock);
9008 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9010 } else if (list_elem->vsi_count > 1) {
9011 remove_rule = false;
9012 vsi_handle = rinfo->sw_act.vsi_handle;
9013 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9015 vsi_handle = rinfo->sw_act.vsi_handle;
9016 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9018 ice_release_lock(rule_lock);
9021 if (list_elem->vsi_count == 0)
9024 ice_release_lock(rule_lock);
9026 struct ice_aqc_sw_rules_elem *s_rule;
9029 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9030 s_rule = (struct ice_aqc_sw_rules_elem *)
9031 ice_malloc(hw, rule_buf_sz);
9033 return ICE_ERR_NO_MEMORY;
9034 s_rule->pdata.lkup_tx_rx.act = 0;
9035 s_rule->pdata.lkup_tx_rx.index =
9036 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9037 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9038 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9040 ice_aqc_opc_remove_sw_rules, NULL);
9041 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9042 struct ice_switch_info *sw = hw->switch_info;
9044 ice_acquire_lock(rule_lock);
9045 LIST_DEL(&list_elem->list_entry);
9046 ice_free(hw, list_elem->lkups);
9047 ice_free(hw, list_elem);
9048 ice_release_lock(rule_lock);
9049 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9050 sw->recp_list[rid].adv_rule = false;
9052 ice_free(hw, s_rule);
9058 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9059 * @hw: pointer to the hardware structure
9060 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9062 * This function is used to remove 1 rule at a time. The removal is based on
9063 * the remove_entry parameter. This function will remove rule for a given
9064 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9067 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9068 struct ice_rule_query_data *remove_entry)
9070 struct ice_adv_fltr_mgmt_list_entry *list_itr;
9071 struct LIST_HEAD_TYPE *list_head;
9072 struct ice_adv_rule_info rinfo;
9073 struct ice_switch_info *sw;
9075 sw = hw->switch_info;
9076 if (!sw->recp_list[remove_entry->rid].recp_created)
9077 return ICE_ERR_PARAM;
9078 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9079 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9081 if (list_itr->rule_info.fltr_rule_id ==
9082 remove_entry->rule_id) {
9083 rinfo = list_itr->rule_info;
9084 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9085 return ice_rem_adv_rule(hw, list_itr->lkups,
9086 list_itr->lkups_cnt, &rinfo);
9089 /* either list is empty or unable to find rule */
9090 return ICE_ERR_DOES_NOT_EXIST;
9094 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9096 * @hw: pointer to the hardware structure
9097 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9099 * This function is used to remove all the rules for a given VSI and as soon
9100 * as removing a rule fails, it will return immediately with the error code,
9101 * else it will return ICE_SUCCESS
9103 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9105 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9106 struct ice_vsi_list_map_info *map_info;
9107 struct LIST_HEAD_TYPE *list_head;
9108 struct ice_adv_rule_info rinfo;
9109 struct ice_switch_info *sw;
9110 enum ice_status status;
9113 sw = hw->switch_info;
9114 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9115 if (!sw->recp_list[rid].recp_created)
9117 if (!sw->recp_list[rid].adv_rule)
9120 list_head = &sw->recp_list[rid].filt_rules;
9121 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9122 ice_adv_fltr_mgmt_list_entry,
9124 rinfo = list_itr->rule_info;
9126 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9127 map_info = list_itr->vsi_list_info;
9131 if (!ice_is_bit_set(map_info->vsi_map,
9134 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9138 rinfo.sw_act.vsi_handle = vsi_handle;
9139 status = ice_rem_adv_rule(hw, list_itr->lkups,
9140 list_itr->lkups_cnt, &rinfo);
9150 * ice_replay_fltr - Replay all the filters stored by a specific list head
9151 * @hw: pointer to the hardware structure
9152 * @list_head: list for which filters needs to be replayed
9153 * @recp_id: Recipe ID for which rules need to be replayed
9155 static enum ice_status
9156 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9158 struct ice_fltr_mgmt_list_entry *itr;
9159 enum ice_status status = ICE_SUCCESS;
9160 struct ice_sw_recipe *recp_list;
9161 u8 lport = hw->port_info->lport;
9162 struct LIST_HEAD_TYPE l_head;
9164 if (LIST_EMPTY(list_head))
9167 recp_list = &hw->switch_info->recp_list[recp_id];
9168 /* Move entries from the given list_head to a temporary l_head so that
9169 * they can be replayed. Otherwise when trying to re-add the same
9170 * filter, the function will return already exists
9172 LIST_REPLACE_INIT(list_head, &l_head);
9174 /* Mark the given list_head empty by reinitializing it so filters
9175 * could be added again by *handler
9177 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9179 struct ice_fltr_list_entry f_entry;
9182 f_entry.fltr_info = itr->fltr_info;
9183 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9184 status = ice_add_rule_internal(hw, recp_list, lport,
9186 if (status != ICE_SUCCESS)
9191 /* Add a filter per VSI separately */
9192 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9194 if (!ice_is_vsi_valid(hw, vsi_handle))
9197 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9198 f_entry.fltr_info.vsi_handle = vsi_handle;
9199 f_entry.fltr_info.fwd_id.hw_vsi_id =
9200 ice_get_hw_vsi_num(hw, vsi_handle);
9201 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9202 if (recp_id == ICE_SW_LKUP_VLAN)
9203 status = ice_add_vlan_internal(hw, recp_list,
9206 status = ice_add_rule_internal(hw, recp_list,
9209 if (status != ICE_SUCCESS)
9214 /* Clear the filter management list */
9215 ice_rem_sw_rule_info(hw, &l_head);
9220 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9221 * @hw: pointer to the hardware structure
9223 * NOTE: This function does not clean up partially added filters on error.
9224 * It is up to caller of the function to issue a reset or fail early.
9226 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9228 struct ice_switch_info *sw = hw->switch_info;
9229 enum ice_status status = ICE_SUCCESS;
9232 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9233 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9235 status = ice_replay_fltr(hw, i, head);
9236 if (status != ICE_SUCCESS)
9243 * ice_replay_vsi_fltr - Replay filters for requested VSI
9244 * @hw: pointer to the hardware structure
9245 * @pi: pointer to port information structure
9246 * @sw: pointer to switch info struct for which function replays filters
9247 * @vsi_handle: driver VSI handle
9248 * @recp_id: Recipe ID for which rules need to be replayed
9249 * @list_head: list for which filters need to be replayed
9251 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9252 * It is required to pass valid VSI handle.
9254 static enum ice_status
9255 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9256 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9257 struct LIST_HEAD_TYPE *list_head)
9259 struct ice_fltr_mgmt_list_entry *itr;
9260 enum ice_status status = ICE_SUCCESS;
9261 struct ice_sw_recipe *recp_list;
9264 if (LIST_EMPTY(list_head))
9266 recp_list = &sw->recp_list[recp_id];
9267 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9269 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9271 struct ice_fltr_list_entry f_entry;
9273 f_entry.fltr_info = itr->fltr_info;
9274 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9275 itr->fltr_info.vsi_handle == vsi_handle) {
9276 /* update the src in case it is VSI num */
9277 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9278 f_entry.fltr_info.src = hw_vsi_id;
9279 status = ice_add_rule_internal(hw, recp_list,
9282 if (status != ICE_SUCCESS)
9286 if (!itr->vsi_list_info ||
9287 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9289 /* Clearing it so that the logic can add it back */
9290 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9291 f_entry.fltr_info.vsi_handle = vsi_handle;
9292 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9293 /* update the src in case it is VSI num */
9294 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9295 f_entry.fltr_info.src = hw_vsi_id;
9296 if (recp_id == ICE_SW_LKUP_VLAN)
9297 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9299 status = ice_add_rule_internal(hw, recp_list,
9302 if (status != ICE_SUCCESS)
9310 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9311 * @hw: pointer to the hardware structure
9312 * @vsi_handle: driver VSI handle
9313 * @list_head: list for which filters need to be replayed
9315 * Replay the advanced rule for the given VSI.
9317 static enum ice_status
9318 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9319 struct LIST_HEAD_TYPE *list_head)
9321 struct ice_rule_query_data added_entry = { 0 };
9322 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9323 enum ice_status status = ICE_SUCCESS;
9325 if (LIST_EMPTY(list_head))
9327 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9329 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9330 u16 lk_cnt = adv_fltr->lkups_cnt;
9332 if (vsi_handle != rinfo->sw_act.vsi_handle)
9334 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9343 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9344 * @hw: pointer to the hardware structure
9345 * @pi: pointer to port information structure
9346 * @vsi_handle: driver VSI handle
9348 * Replays filters for requested VSI via vsi_handle.
9351 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9354 struct ice_switch_info *sw = hw->switch_info;
9355 enum ice_status status;
9358 /* Update the recipes that were created */
9359 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9360 struct LIST_HEAD_TYPE *head;
9362 head = &sw->recp_list[i].filt_replay_rules;
9363 if (!sw->recp_list[i].adv_rule)
9364 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9367 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9368 if (status != ICE_SUCCESS)
9376 * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9377 * @hw: pointer to the HW struct
9378 * @sw: pointer to switch info struct for which function removes filters
9380 * Deletes the filter replay rules for given switch
9382 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9389 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9390 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9391 struct LIST_HEAD_TYPE *l_head;
9393 l_head = &sw->recp_list[i].filt_replay_rules;
9394 if (!sw->recp_list[i].adv_rule)
9395 ice_rem_sw_rule_info(hw, l_head);
9397 ice_rem_adv_rule_info(hw, l_head);
9403 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9404 * @hw: pointer to the HW struct
9406 * Deletes the filter replay rules.
9408 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9410 ice_rm_sw_replay_rule_info(hw, hw->switch_info);