1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV6_ETHER_ID 0x86DD
14 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
15 #define ICE_PPP_IPV6_PROTO_ID 0x0057
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_GTPU_PROFILE 24
18 #define ICE_ETH_P_8021Q 0x8100
19 #define ICE_MPLS_ETHER_ID 0x8847
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22 * struct to configure any switch filter rules.
23 * {DA (6 bytes), SA(6 bytes),
24 * Ether type (2 bytes for header without VLAN tag) OR
25 * VLAN tag (4 bytes for header with VLAN tag) }
27 * Word on Hardcoded values
28 * byte 0 = 0x2: to identify it as locally administered DA MAC
29 * byte 6 = 0x2: to identify it as locally administered SA MAC
30 * byte 12 = 0x81 & byte 13 = 0x00:
31 * In case of VLAN filter first two bytes defines ether type (0x8100)
32 * and remaining two bytes are placeholder for programming a given VLAN ID
33 * In case of Ether type filter it is treated as header without VLAN tag
34 * and byte 12 and 13 is used to program a given Ether type instead
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
40 struct ice_dummy_pkt_offsets {
41 enum ice_protocol_type type;
42 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
48 { ICE_IPV4_OFOS, 14 },
53 { ICE_PROTOCOL_LAST, 0 },
56 static const u8 dummy_gre_tcp_packet[] = {
57 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
58 0x00, 0x00, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00,
61 0x08, 0x00, /* ICE_ETYPE_OL 12 */
63 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
64 0x00, 0x00, 0x00, 0x00,
65 0x00, 0x2F, 0x00, 0x00,
66 0x00, 0x00, 0x00, 0x00,
67 0x00, 0x00, 0x00, 0x00,
69 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
70 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
77 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
78 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x06, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
86 0x50, 0x02, 0x20, 0x00,
87 0x00, 0x00, 0x00, 0x00
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
93 { ICE_IPV4_OFOS, 14 },
98 { ICE_PROTOCOL_LAST, 0 },
101 static const u8 dummy_gre_udp_packet[] = {
102 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
103 0x00, 0x00, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00,
106 0x08, 0x00, /* ICE_ETYPE_OL 12 */
108 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x2F, 0x00, 0x00,
111 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00,
114 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
115 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
122 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x11, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
129 0x00, 0x08, 0x00, 0x00,
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
134 { ICE_ETYPE_OL, 12 },
135 { ICE_IPV4_OFOS, 14 },
139 { ICE_VXLAN_GPE, 42 },
143 { ICE_PROTOCOL_LAST, 0 },
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
148 0x00, 0x00, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
151 0x08, 0x00, /* ICE_ETYPE_OL 12 */
153 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154 0x00, 0x01, 0x00, 0x00,
155 0x40, 0x11, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00,
157 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160 0x00, 0x46, 0x00, 0x00,
162 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
170 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171 0x00, 0x01, 0x00, 0x00,
172 0x40, 0x06, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
179 0x50, 0x02, 0x20, 0x00,
180 0x00, 0x00, 0x00, 0x00
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
185 { ICE_ETYPE_OL, 12 },
186 { ICE_IPV4_OFOS, 14 },
190 { ICE_VXLAN_GPE, 42 },
193 { ICE_UDP_ILOS, 84 },
194 { ICE_PROTOCOL_LAST, 0 },
197 static const u8 dummy_udp_tun_udp_packet[] = {
198 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
199 0x00, 0x00, 0x00, 0x00,
200 0x00, 0x00, 0x00, 0x00,
202 0x08, 0x00, /* ICE_ETYPE_OL 12 */
204 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205 0x00, 0x01, 0x00, 0x00,
206 0x00, 0x11, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211 0x00, 0x3a, 0x00, 0x00,
213 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
221 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222 0x00, 0x01, 0x00, 0x00,
223 0x00, 0x11, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228 0x00, 0x08, 0x00, 0x00,
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
234 { ICE_ETYPE_OL, 12 },
235 { ICE_IPV4_OFOS, 14 },
236 { ICE_UDP_ILOS, 34 },
237 { ICE_PROTOCOL_LAST, 0 },
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00,
246 0x08, 0x00, /* ICE_ETYPE_OL 12 */
248 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249 0x00, 0x01, 0x00, 0x00,
250 0x00, 0x11, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00,
254 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255 0x00, 0x08, 0x00, 0x00,
257 0x00, 0x00, /* 2 bytes for 4 byte alignment */
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
263 { ICE_VLAN_OFOS, 12 },
264 { ICE_ETYPE_OL, 16 },
265 { ICE_IPV4_OFOS, 18 },
266 { ICE_UDP_ILOS, 38 },
267 { ICE_PROTOCOL_LAST, 0 },
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
278 0x08, 0x00, /* ICE_ETYPE_OL 16 */
280 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281 0x00, 0x01, 0x00, 0x00,
282 0x00, 0x11, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00,
286 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287 0x00, 0x08, 0x00, 0x00,
289 0x00, 0x00, /* 2 bytes for 4 byte alignment */
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
295 { ICE_ETYPE_OL, 12 },
296 { ICE_IPV4_OFOS, 14 },
298 { ICE_PROTOCOL_LAST, 0 },
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00,
307 0x08, 0x00, /* ICE_ETYPE_OL 12 */
309 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310 0x00, 0x01, 0x00, 0x00,
311 0x00, 0x06, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00,
318 0x50, 0x00, 0x00, 0x00,
319 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, /* 2 bytes for 4 byte alignment */
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
327 { ICE_ETYPE_OL, 12 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x88, 0x47, /* ICE_ETYPE_OL 12 */
338 0x00, 0x00, 0x01, 0x00,
340 0x00, 0x00, /* 2 bytes for 4 byte alignment */
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
346 { ICE_VLAN_OFOS, 12 },
347 { ICE_ETYPE_OL, 16 },
348 { ICE_IPV4_OFOS, 18 },
350 { ICE_PROTOCOL_LAST, 0 },
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356 0x00, 0x00, 0x00, 0x00,
357 0x00, 0x00, 0x00, 0x00,
359 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
361 0x08, 0x00, /* ICE_ETYPE_OL 16 */
363 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364 0x00, 0x01, 0x00, 0x00,
365 0x00, 0x06, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
372 0x50, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, /* 2 bytes for 4 byte alignment */
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
380 { ICE_ETYPE_OL, 12 },
381 { ICE_IPV6_OFOS, 14 },
383 { ICE_PROTOCOL_LAST, 0 },
386 static const u8 dummy_tcp_ipv6_packet[] = {
387 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388 0x00, 0x00, 0x00, 0x00,
389 0x00, 0x00, 0x00, 0x00,
391 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
393 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
407 0x50, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, /* 2 bytes for 4 byte alignment */
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
417 { ICE_VLAN_OFOS, 12 },
418 { ICE_ETYPE_OL, 16 },
419 { ICE_IPV6_OFOS, 18 },
421 { ICE_PROTOCOL_LAST, 0 },
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427 0x00, 0x00, 0x00, 0x00,
428 0x00, 0x00, 0x00, 0x00,
430 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
432 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
434 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
448 0x50, 0x00, 0x00, 0x00,
449 0x00, 0x00, 0x00, 0x00,
451 0x00, 0x00, /* 2 bytes for 4 byte alignment */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
457 { ICE_ETYPE_OL, 12 },
458 { ICE_IPV6_OFOS, 14 },
459 { ICE_UDP_ILOS, 54 },
460 { ICE_PROTOCOL_LAST, 0 },
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466 0x00, 0x00, 0x00, 0x00,
467 0x00, 0x00, 0x00, 0x00,
469 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
471 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483 0x00, 0x10, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486 0x00, 0x00, 0x00, 0x00,
488 0x00, 0x00, /* 2 bytes for 4 byte alignment */
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
495 { ICE_VLAN_OFOS, 12 },
496 { ICE_ETYPE_OL, 16 },
497 { ICE_IPV6_OFOS, 18 },
498 { ICE_UDP_ILOS, 58 },
499 { ICE_PROTOCOL_LAST, 0 },
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505 0x00, 0x00, 0x00, 0x00,
506 0x00, 0x00, 0x00, 0x00,
508 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
510 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
512 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524 0x00, 0x08, 0x00, 0x00,
526 0x00, 0x00, /* 2 bytes for 4 byte alignment */
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
532 { ICE_IPV4_OFOS, 14 },
537 { ICE_PROTOCOL_LAST, 0 },
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
546 0x45, 0x00, 0x00, 0x58, /* IP 14 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x11, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553 0x00, 0x44, 0x00, 0x00,
555 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x85,
559 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560 0x00, 0x00, 0x00, 0x00,
562 0x45, 0x00, 0x00, 0x28, /* IP 62 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x06, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x00,
566 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00,
571 0x50, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, /* 2 bytes for 4 byte alignment */
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
580 { ICE_IPV4_OFOS, 14 },
584 { ICE_UDP_ILOS, 82 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595 0x00, 0x00, 0x00, 0x00,
596 0x00, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601 0x00, 0x38, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608 0x00, 0x00, 0x00, 0x00,
610 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x11, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617 0x00, 0x08, 0x00, 0x00,
619 0x00, 0x00, /* 2 bytes for 4 byte alignment */
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
625 { ICE_IPV4_OFOS, 14 },
630 { ICE_PROTOCOL_LAST, 0 },
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00,
639 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x11, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646 0x00, 0x58, 0x00, 0x00,
648 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x85,
652 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653 0x00, 0x00, 0x00, 0x00,
655 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656 0x00, 0x14, 0x06, 0x00,
657 0x00, 0x00, 0x00, 0x00,
658 0x00, 0x00, 0x00, 0x00,
659 0x00, 0x00, 0x00, 0x00,
660 0x00, 0x00, 0x00, 0x00,
661 0x00, 0x00, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00,
669 0x50, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, /* 2 bytes for 4 byte alignment */
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
677 { ICE_IPV4_OFOS, 14 },
681 { ICE_UDP_ILOS, 102 },
682 { ICE_PROTOCOL_LAST, 0 },
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00,
691 0x45, 0x00, 0x00, 0x60, /* IP 14 */
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x11, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698 0x00, 0x4c, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708 0x00, 0x08, 0x11, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
718 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719 0x00, 0x08, 0x00, 0x00,
721 0x00, 0x00, /* 2 bytes for 4 byte alignment */
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
726 { ICE_IPV6_OFOS, 14 },
731 { ICE_PROTOCOL_LAST, 0 },
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736 0x00, 0x00, 0x00, 0x00,
737 0x00, 0x00, 0x00, 0x00,
740 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741 0x00, 0x44, 0x11, 0x00,
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
745 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752 0x00, 0x44, 0x00, 0x00,
754 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755 0x00, 0x00, 0x00, 0x00,
756 0x00, 0x00, 0x00, 0x85,
758 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759 0x00, 0x00, 0x00, 0x00,
761 0x45, 0x00, 0x00, 0x28, /* IP 82 */
762 0x00, 0x00, 0x00, 0x00,
763 0x00, 0x06, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x50, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, /* 2 bytes for 4 byte alignment */
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
778 { ICE_IPV6_OFOS, 14 },
782 { ICE_UDP_ILOS, 102 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793 0x00, 0x38, 0x11, 0x00,
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
803 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804 0x00, 0x38, 0x00, 0x00,
806 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x85,
810 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811 0x00, 0x00, 0x00, 0x00,
813 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814 0x00, 0x00, 0x00, 0x00,
815 0x00, 0x11, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820 0x00, 0x08, 0x00, 0x00,
822 0x00, 0x00, /* 2 bytes for 4 byte alignment */
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
827 { ICE_IPV6_OFOS, 14 },
832 { ICE_PROTOCOL_LAST, 0 },
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837 0x00, 0x00, 0x00, 0x00,
838 0x00, 0x00, 0x00, 0x00,
841 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842 0x00, 0x58, 0x11, 0x00,
843 0x00, 0x00, 0x00, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853 0x00, 0x58, 0x00, 0x00,
855 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, 0x00, 0x85,
859 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860 0x00, 0x00, 0x00, 0x00,
862 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863 0x00, 0x14, 0x06, 0x00,
864 0x00, 0x00, 0x00, 0x00,
865 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
869 0x00, 0x00, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
871 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x50, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, /* 2 bytes for 4 byte alignment */
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
884 { ICE_IPV6_OFOS, 14 },
888 { ICE_UDP_ILOS, 122 },
889 { ICE_PROTOCOL_LAST, 0 },
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 0x00, 0x4c, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 0x00, 0x4c, 0x00, 0x00,
912 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x85,
916 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 0x00, 0x00, 0x00, 0x00,
919 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920 0x00, 0x08, 0x11, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
928 0x00, 0x00, 0x00, 0x00,
930 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931 0x00, 0x08, 0x00, 0x00,
933 0x00, 0x00, /* 2 bytes for 4 byte alignment */
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
938 { ICE_IPV4_OFOS, 14 },
942 { ICE_PROTOCOL_LAST, 0 },
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947 0x00, 0x00, 0x00, 0x00,
948 0x00, 0x00, 0x00, 0x00,
951 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952 0x00, 0x00, 0x40, 0x00,
953 0x40, 0x11, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
957 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958 0x00, 0x00, 0x00, 0x00,
960 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
961 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x85,
964 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965 0x00, 0x00, 0x00, 0x00,
967 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968 0x00, 0x00, 0x40, 0x00,
969 0x40, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
978 { ICE_IPV4_OFOS, 14 },
982 { ICE_PROTOCOL_LAST, 0 },
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987 0x00, 0x00, 0x00, 0x00,
988 0x00, 0x00, 0x00, 0x00,
991 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992 0x00, 0x00, 0x40, 0x00,
993 0x40, 0x11, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
997 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998 0x00, 0x00, 0x00, 0x00,
1000 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1001 0x00, 0x00, 0x00, 0x00,
1002 0x00, 0x00, 0x00, 0x85,
1004 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005 0x00, 0x00, 0x00, 0x00,
1007 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008 0x00, 0x00, 0x3b, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1015 0x00, 0x00, 0x00, 0x00,
1016 0x00, 0x00, 0x00, 0x00,
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023 { ICE_MAC_OFOS, 0 },
1024 { ICE_IPV6_OFOS, 14 },
1027 { ICE_IPV4_IL, 82 },
1028 { ICE_PROTOCOL_LAST, 0 },
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00,
1037 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039 0x00, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1044 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00,
1046 0x00, 0x00, 0x00, 0x00,
1048 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049 0x00, 0x00, 0x00, 0x00,
1051 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x85,
1055 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056 0x00, 0x00, 0x00, 0x00,
1058 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059 0x00, 0x00, 0x40, 0x00,
1060 0x40, 0x00, 0x00, 0x00,
1061 0x00, 0x00, 0x00, 0x00,
1062 0x00, 0x00, 0x00, 0x00,
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069 { ICE_MAC_OFOS, 0 },
1070 { ICE_IPV6_OFOS, 14 },
1073 { ICE_IPV6_IL, 82 },
1074 { ICE_PROTOCOL_LAST, 0 },
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1083 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1094 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095 0x00, 0x00, 0x00, 0x00,
1097 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x85,
1101 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102 0x00, 0x00, 0x00, 0x00,
1104 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105 0x00, 0x00, 0x3b, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1109 0x00, 0x00, 0x00, 0x00,
1110 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, 0x00, 0x00,
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119 { ICE_MAC_OFOS, 0 },
1120 { ICE_IPV4_OFOS, 14 },
1123 { ICE_PROTOCOL_LAST, 0 },
1126 static const u8 dummy_udp_gtp_packet[] = {
1127 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128 0x00, 0x00, 0x00, 0x00,
1129 0x00, 0x00, 0x00, 0x00,
1132 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x11, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139 0x00, 0x1c, 0x00, 0x00,
1141 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142 0x00, 0x00, 0x00, 0x00,
1143 0x00, 0x00, 0x00, 0x85,
1145 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146 0x00, 0x00, 0x00, 0x00,
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_GTP_NO_PAY, 42 },
1155 { ICE_PROTOCOL_LAST, 0 },
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160 { ICE_MAC_OFOS, 0 },
1161 { ICE_IPV6_OFOS, 14 },
1163 { ICE_GTP_NO_PAY, 62 },
1164 { ICE_PROTOCOL_LAST, 0 },
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169 0x00, 0x00, 0x00, 0x00,
1170 0x00, 0x00, 0x00, 0x00,
1173 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175 0x00, 0x00, 0x00, 0x00,
1176 0x00, 0x00, 0x00, 0x00,
1177 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00,
1179 0x00, 0x00, 0x00, 0x00,
1180 0x00, 0x00, 0x00, 0x00,
1181 0x00, 0x00, 0x00, 0x00,
1182 0x00, 0x00, 0x00, 0x00,
1184 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185 0x00, 0x00, 0x00, 0x00,
1187 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1188 0x00, 0x00, 0x00, 0x00,
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194 { ICE_MAC_OFOS, 0 },
1195 { ICE_VLAN_OFOS, 12 },
1196 { ICE_ETYPE_OL, 16 },
1198 { ICE_PROTOCOL_LAST, 0 },
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202 { ICE_MAC_OFOS, 0 },
1203 { ICE_VLAN_OFOS, 12 },
1204 { ICE_ETYPE_OL, 16 },
1206 { ICE_IPV4_OFOS, 26 },
1207 { ICE_PROTOCOL_LAST, 0 },
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 0x00, 0x00, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1215 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1217 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1219 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1222 0x00, 0x21, /* PPP Link Layer 24 */
1224 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00,
1227 0x00, 0x00, 0x00, 0x00,
1228 0x00, 0x00, 0x00, 0x00,
1230 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235 { ICE_MAC_OFOS, 0 },
1236 { ICE_VLAN_OFOS, 12 },
1237 { ICE_ETYPE_OL, 16 },
1239 { ICE_IPV4_OFOS, 26 },
1241 { ICE_PROTOCOL_LAST, 0 },
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246 0x00, 0x00, 0x00, 0x00,
1247 0x00, 0x00, 0x00, 0x00,
1249 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1251 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1253 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1256 0x00, 0x21, /* PPP Link Layer 24 */
1258 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259 0x00, 0x01, 0x00, 0x00,
1260 0x00, 0x06, 0x00, 0x00,
1261 0x00, 0x00, 0x00, 0x00,
1262 0x00, 0x00, 0x00, 0x00,
1264 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265 0x00, 0x00, 0x00, 0x00,
1266 0x00, 0x00, 0x00, 0x00,
1267 0x50, 0x00, 0x00, 0x00,
1268 0x00, 0x00, 0x00, 0x00,
1270 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275 { ICE_MAC_OFOS, 0 },
1276 { ICE_VLAN_OFOS, 12 },
1277 { ICE_ETYPE_OL, 16 },
1279 { ICE_IPV4_OFOS, 26 },
1280 { ICE_UDP_ILOS, 46 },
1281 { ICE_PROTOCOL_LAST, 0 },
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286 0x00, 0x00, 0x00, 0x00,
1287 0x00, 0x00, 0x00, 0x00,
1289 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1291 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1293 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1296 0x00, 0x21, /* PPP Link Layer 24 */
1298 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299 0x00, 0x01, 0x00, 0x00,
1300 0x00, 0x11, 0x00, 0x00,
1301 0x00, 0x00, 0x00, 0x00,
1302 0x00, 0x00, 0x00, 0x00,
1304 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305 0x00, 0x08, 0x00, 0x00,
1307 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311 { ICE_MAC_OFOS, 0 },
1312 { ICE_VLAN_OFOS, 12 },
1313 { ICE_ETYPE_OL, 16 },
1315 { ICE_IPV6_OFOS, 26 },
1316 { ICE_PROTOCOL_LAST, 0 },
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321 0x00, 0x00, 0x00, 0x00,
1322 0x00, 0x00, 0x00, 0x00,
1324 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1326 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1328 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1331 0x00, 0x57, /* PPP Link Layer 24 */
1333 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334 0x00, 0x00, 0x3b, 0x00,
1335 0x00, 0x00, 0x00, 0x00,
1336 0x00, 0x00, 0x00, 0x00,
1337 0x00, 0x00, 0x00, 0x00,
1338 0x00, 0x00, 0x00, 0x00,
1339 0x00, 0x00, 0x00, 0x00,
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349 { ICE_MAC_OFOS, 0 },
1350 { ICE_VLAN_OFOS, 12 },
1351 { ICE_ETYPE_OL, 16 },
1353 { ICE_IPV6_OFOS, 26 },
1355 { ICE_PROTOCOL_LAST, 0 },
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360 0x00, 0x00, 0x00, 0x00,
1361 0x00, 0x00, 0x00, 0x00,
1363 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1365 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1367 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1370 0x00, 0x57, /* PPP Link Layer 24 */
1372 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00,
1379 0x00, 0x00, 0x00, 0x00,
1380 0x00, 0x00, 0x00, 0x00,
1381 0x00, 0x00, 0x00, 0x00,
1383 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384 0x00, 0x00, 0x00, 0x00,
1385 0x00, 0x00, 0x00, 0x00,
1386 0x50, 0x00, 0x00, 0x00,
1387 0x00, 0x00, 0x00, 0x00,
1389 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394 { ICE_MAC_OFOS, 0 },
1395 { ICE_VLAN_OFOS, 12 },
1396 { ICE_ETYPE_OL, 16 },
1398 { ICE_IPV6_OFOS, 26 },
1399 { ICE_UDP_ILOS, 66 },
1400 { ICE_PROTOCOL_LAST, 0 },
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405 0x00, 0x00, 0x00, 0x00,
1406 0x00, 0x00, 0x00, 0x00,
1408 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1410 0x88, 0x64, /* ICE_ETYPE_OL 16 */
1412 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1415 0x00, 0x57, /* PPP Link Layer 24 */
1417 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419 0x00, 0x00, 0x00, 0x00,
1420 0x00, 0x00, 0x00, 0x00,
1421 0x00, 0x00, 0x00, 0x00,
1422 0x00, 0x00, 0x00, 0x00,
1423 0x00, 0x00, 0x00, 0x00,
1424 0x00, 0x00, 0x00, 0x00,
1425 0x00, 0x00, 0x00, 0x00,
1426 0x00, 0x00, 0x00, 0x00,
1428 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429 0x00, 0x08, 0x00, 0x00,
1431 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435 { ICE_MAC_OFOS, 0 },
1436 { ICE_IPV4_OFOS, 14 },
1438 { ICE_PROTOCOL_LAST, 0 },
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443 0x00, 0x00, 0x00, 0x00,
1444 0x00, 0x00, 0x00, 0x00,
1447 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448 0x00, 0x00, 0x40, 0x00,
1449 0x40, 0x32, 0x00, 0x00,
1450 0x00, 0x00, 0x00, 0x00,
1451 0x00, 0x00, 0x00, 0x00,
1453 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454 0x00, 0x00, 0x00, 0x00,
1455 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459 { ICE_MAC_OFOS, 0 },
1460 { ICE_IPV6_OFOS, 14 },
1462 { ICE_PROTOCOL_LAST, 0 },
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467 0x00, 0x00, 0x00, 0x00,
1468 0x00, 0x00, 0x00, 0x00,
1471 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00,
1475 0x00, 0x00, 0x00, 0x00,
1476 0x00, 0x00, 0x00, 0x00,
1477 0x00, 0x00, 0x00, 0x00,
1478 0x00, 0x00, 0x00, 0x00,
1479 0x00, 0x00, 0x00, 0x00,
1480 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488 { ICE_MAC_OFOS, 0 },
1489 { ICE_IPV4_OFOS, 14 },
1491 { ICE_PROTOCOL_LAST, 0 },
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496 0x00, 0x00, 0x00, 0x00,
1497 0x00, 0x00, 0x00, 0x00,
1500 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501 0x00, 0x00, 0x40, 0x00,
1502 0x40, 0x33, 0x00, 0x00,
1503 0x00, 0x00, 0x00, 0x00,
1504 0x00, 0x00, 0x00, 0x00,
1506 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507 0x00, 0x00, 0x00, 0x00,
1508 0x00, 0x00, 0x00, 0x00,
1509 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513 { ICE_MAC_OFOS, 0 },
1514 { ICE_IPV6_OFOS, 14 },
1516 { ICE_PROTOCOL_LAST, 0 },
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521 0x00, 0x00, 0x00, 0x00,
1522 0x00, 0x00, 0x00, 0x00,
1525 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527 0x00, 0x00, 0x00, 0x00,
1528 0x00, 0x00, 0x00, 0x00,
1529 0x00, 0x00, 0x00, 0x00,
1530 0x00, 0x00, 0x00, 0x00,
1531 0x00, 0x00, 0x00, 0x00,
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1534 0x00, 0x00, 0x00, 0x00,
1536 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537 0x00, 0x00, 0x00, 0x00,
1538 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543 { ICE_MAC_OFOS, 0 },
1544 { ICE_IPV4_OFOS, 14 },
1545 { ICE_UDP_ILOS, 34 },
1547 { ICE_PROTOCOL_LAST, 0 },
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552 0x00, 0x00, 0x00, 0x00,
1553 0x00, 0x00, 0x00, 0x00,
1556 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557 0x00, 0x00, 0x40, 0x00,
1558 0x40, 0x11, 0x00, 0x00,
1559 0x00, 0x00, 0x00, 0x00,
1560 0x00, 0x00, 0x00, 0x00,
1562 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563 0x00, 0x00, 0x00, 0x00,
1565 0x00, 0x00, 0x00, 0x00,
1566 0x00, 0x00, 0x00, 0x00,
1567 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571 { ICE_MAC_OFOS, 0 },
1572 { ICE_IPV6_OFOS, 14 },
1573 { ICE_UDP_ILOS, 54 },
1575 { ICE_PROTOCOL_LAST, 0 },
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580 0x00, 0x00, 0x00, 0x00,
1581 0x00, 0x00, 0x00, 0x00,
1584 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586 0x00, 0x00, 0x00, 0x00,
1587 0x00, 0x00, 0x00, 0x00,
1588 0x00, 0x00, 0x00, 0x00,
1589 0x00, 0x00, 0x00, 0x00,
1590 0x00, 0x00, 0x00, 0x00,
1591 0x00, 0x00, 0x00, 0x00,
1592 0x00, 0x00, 0x00, 0x00,
1593 0x00, 0x00, 0x00, 0x00,
1595 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596 0x00, 0x00, 0x00, 0x00,
1598 0x00, 0x00, 0x00, 0x00,
1599 0x00, 0x00, 0x00, 0x00,
1600 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605 { ICE_MAC_OFOS, 0 },
1606 { ICE_IPV4_OFOS, 14 },
1608 { ICE_PROTOCOL_LAST, 0 },
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613 0x00, 0x00, 0x00, 0x00,
1614 0x00, 0x00, 0x00, 0x00,
1617 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618 0x00, 0x00, 0x40, 0x00,
1619 0x40, 0x73, 0x00, 0x00,
1620 0x00, 0x00, 0x00, 0x00,
1621 0x00, 0x00, 0x00, 0x00,
1623 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624 0x00, 0x00, 0x00, 0x00,
1625 0x00, 0x00, 0x00, 0x00,
1626 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630 { ICE_MAC_OFOS, 0 },
1631 { ICE_IPV6_OFOS, 14 },
1633 { ICE_PROTOCOL_LAST, 0 },
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638 0x00, 0x00, 0x00, 0x00,
1639 0x00, 0x00, 0x00, 0x00,
1642 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643 0x00, 0x0c, 0x73, 0x40,
1644 0x00, 0x00, 0x00, 0x00,
1645 0x00, 0x00, 0x00, 0x00,
1646 0x00, 0x00, 0x00, 0x00,
1647 0x00, 0x00, 0x00, 0x00,
1648 0x00, 0x00, 0x00, 0x00,
1649 0x00, 0x00, 0x00, 0x00,
1650 0x00, 0x00, 0x00, 0x00,
1651 0x00, 0x00, 0x00, 0x00,
1653 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654 0x00, 0x00, 0x00, 0x00,
1655 0x00, 0x00, 0x00, 0x00,
1656 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660 { ICE_MAC_OFOS, 0 },
1661 { ICE_VLAN_EX, 12 },
1662 { ICE_VLAN_IN, 16 },
1663 { ICE_ETYPE_OL, 20 },
1664 { ICE_IPV4_OFOS, 22 },
1665 { ICE_PROTOCOL_LAST, 0 },
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670 0x00, 0x00, 0x00, 0x00,
1671 0x00, 0x00, 0x00, 0x00,
1673 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1674 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1675 0x08, 0x00, /* ICE_ETYPE_OL 20 */
1677 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1678 0x00, 0x01, 0x00, 0x00,
1679 0x00, 0x11, 0x00, 0x00,
1680 0x00, 0x00, 0x00, 0x00,
1681 0x00, 0x00, 0x00, 0x00,
1683 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1684 0x00, 0x08, 0x00, 0x00,
1686 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1689 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1690 { ICE_MAC_OFOS, 0 },
1691 { ICE_VLAN_EX, 12 },
1692 { ICE_VLAN_IN, 16 },
1693 { ICE_ETYPE_OL, 20 },
1694 { ICE_IPV6_OFOS, 22 },
1695 { ICE_PROTOCOL_LAST, 0 },
1698 static const u8 dummy_qinq_ipv6_pkt[] = {
1699 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1700 0x00, 0x00, 0x00, 0x00,
1701 0x00, 0x00, 0x00, 0x00,
1703 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1704 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1705 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
1707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1708 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1709 0x00, 0x00, 0x00, 0x00,
1710 0x00, 0x00, 0x00, 0x00,
1711 0x00, 0x00, 0x00, 0x00,
1712 0x00, 0x00, 0x00, 0x00,
1713 0x00, 0x00, 0x00, 0x00,
1714 0x00, 0x00, 0x00, 0x00,
1715 0x00, 0x00, 0x00, 0x00,
1716 0x00, 0x00, 0x00, 0x00,
1718 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1719 0x00, 0x10, 0x00, 0x00,
1721 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1722 0x00, 0x00, 0x00, 0x00,
1724 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1727 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1728 { ICE_MAC_OFOS, 0 },
1729 { ICE_VLAN_EX, 12 },
1730 { ICE_VLAN_IN, 16 },
1731 { ICE_ETYPE_OL, 20 },
1733 { ICE_PROTOCOL_LAST, 0 },
1737 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1738 { ICE_MAC_OFOS, 0 },
1739 { ICE_VLAN_EX, 12 },
1740 { ICE_VLAN_IN, 16 },
1741 { ICE_ETYPE_OL, 20 },
1743 { ICE_IPV4_OFOS, 30 },
1744 { ICE_PROTOCOL_LAST, 0 },
1747 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1748 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1749 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, 0x00, 0x00,
1752 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1753 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1754 0x88, 0x64, /* ICE_ETYPE_OL 20 */
1756 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1759 0x00, 0x21, /* PPP Link Layer 28 */
1761 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1762 0x00, 0x00, 0x00, 0x00,
1763 0x00, 0x00, 0x00, 0x00,
1764 0x00, 0x00, 0x00, 0x00,
1765 0x00, 0x00, 0x00, 0x00,
1767 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1771 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1772 { ICE_MAC_OFOS, 0 },
1773 { ICE_VLAN_EX, 12 },
1774 { ICE_VLAN_IN, 16 },
1775 { ICE_ETYPE_OL, 20 },
1777 { ICE_IPV6_OFOS, 30 },
1778 { ICE_PROTOCOL_LAST, 0 },
1781 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1782 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1783 0x00, 0x00, 0x00, 0x00,
1784 0x00, 0x00, 0x00, 0x00,
1786 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1787 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1788 0x88, 0x64, /* ICE_ETYPE_OL 20 */
1790 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1793 0x00, 0x57, /* PPP Link Layer 28*/
1795 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1796 0x00, 0x00, 0x3b, 0x00,
1797 0x00, 0x00, 0x00, 0x00,
1798 0x00, 0x00, 0x00, 0x00,
1799 0x00, 0x00, 0x00, 0x00,
1800 0x00, 0x00, 0x00, 0x00,
1801 0x00, 0x00, 0x00, 0x00,
1802 0x00, 0x00, 0x00, 0x00,
1803 0x00, 0x00, 0x00, 0x00,
1804 0x00, 0x00, 0x00, 0x00,
1806 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1809 /* this is a recipe to profile association bitmap */
1810 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1811 ICE_MAX_NUM_PROFILES);
1813 /* this is a profile to recipe association bitmap */
1814 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1815 ICE_MAX_NUM_RECIPES);
1817 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1820 * ice_collect_result_idx - copy result index values
1821 * @buf: buffer that contains the result index
1822 * @recp: the recipe struct to copy data into
1824 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1825 struct ice_sw_recipe *recp)
1827 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1828 ice_set_bit(buf->content.result_indx &
1829 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1832 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1833 { ICE_PROFID_IPV4_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV4},
1834 { ICE_PROFID_IPV4_GTPU_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1835 { ICE_PROFID_IPV4_GTPU_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1836 { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1837 { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1838 { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1839 { ICE_PROFID_IPV4_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV6},
1840 { ICE_PROFID_IPV4_GTPU_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1841 { ICE_PROFID_IPV4_GTPU_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1842 { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1843 { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1844 { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1845 { ICE_PROFID_IPV6_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV4},
1846 { ICE_PROFID_IPV6_GTPU_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1847 { ICE_PROFID_IPV6_GTPU_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1848 { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1849 { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1850 { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1851 { ICE_PROFID_IPV6_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV6},
1852 { ICE_PROFID_IPV6_GTPU_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1853 { ICE_PROFID_IPV6_GTPU_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1854 { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1855 { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1856 { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1860 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1861 * @rid: recipe ID that we are populating
1863 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1865 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1866 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1867 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1868 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1869 enum ice_sw_tunnel_type tun_type;
1870 u16 i, j, k, profile_num = 0;
1871 bool non_tun_valid = false;
1872 bool pppoe_valid = false;
1873 bool vxlan_valid = false;
1874 bool gre_valid = false;
1875 bool gtp_valid = false;
1876 bool flag_valid = false;
1878 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1879 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1884 for (i = 0; i < 12; i++) {
1885 if (gre_profile[i] == j)
1889 for (i = 0; i < 12; i++) {
1890 if (vxlan_profile[i] == j)
1894 for (i = 0; i < 7; i++) {
1895 if (pppoe_profile[i] == j)
1899 for (i = 0; i < 6; i++) {
1900 if (non_tun_profile[i] == j)
1901 non_tun_valid = true;
1904 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1905 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1908 if ((j >= ICE_PROFID_IPV4_ESP &&
1909 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1910 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1911 j <= ICE_PROFID_IPV6_GTPU_TEID))
1915 if (!non_tun_valid && vxlan_valid)
1916 tun_type = ICE_SW_TUN_VXLAN;
1917 else if (!non_tun_valid && gre_valid)
1918 tun_type = ICE_SW_TUN_NVGRE;
1919 else if (!non_tun_valid && pppoe_valid)
1920 tun_type = ICE_SW_TUN_PPPOE;
1921 else if (!non_tun_valid && gtp_valid)
1922 tun_type = ICE_SW_TUN_GTP;
1923 else if (non_tun_valid &&
1924 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1925 tun_type = ICE_SW_TUN_AND_NON_TUN;
1926 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1928 tun_type = ICE_NON_TUN;
1930 tun_type = ICE_NON_TUN;
1932 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1933 i = ice_is_bit_set(recipe_to_profile[rid],
1934 ICE_PROFID_PPPOE_IPV4_OTHER);
1935 j = ice_is_bit_set(recipe_to_profile[rid],
1936 ICE_PROFID_PPPOE_IPV6_OTHER);
1938 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1940 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1943 if (tun_type == ICE_SW_TUN_GTP) {
1944 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
1945 if (ice_is_bit_set(recipe_to_profile[rid],
1946 ice_prof_type_tbl[k].prof_id)) {
1947 tun_type = ice_prof_type_tbl[k].type;
1952 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1953 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1954 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1956 case ICE_PROFID_IPV4_TCP:
1957 tun_type = ICE_SW_IPV4_TCP;
1959 case ICE_PROFID_IPV4_UDP:
1960 tun_type = ICE_SW_IPV4_UDP;
1962 case ICE_PROFID_IPV6_TCP:
1963 tun_type = ICE_SW_IPV6_TCP;
1965 case ICE_PROFID_IPV6_UDP:
1966 tun_type = ICE_SW_IPV6_UDP;
1968 case ICE_PROFID_PPPOE_PAY:
1969 tun_type = ICE_SW_TUN_PPPOE_PAY;
1971 case ICE_PROFID_PPPOE_IPV4_TCP:
1972 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1974 case ICE_PROFID_PPPOE_IPV4_UDP:
1975 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1977 case ICE_PROFID_PPPOE_IPV4_OTHER:
1978 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1980 case ICE_PROFID_PPPOE_IPV6_TCP:
1981 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1983 case ICE_PROFID_PPPOE_IPV6_UDP:
1984 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1986 case ICE_PROFID_PPPOE_IPV6_OTHER:
1987 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1989 case ICE_PROFID_IPV4_ESP:
1990 tun_type = ICE_SW_TUN_IPV4_ESP;
1992 case ICE_PROFID_IPV6_ESP:
1993 tun_type = ICE_SW_TUN_IPV6_ESP;
1995 case ICE_PROFID_IPV4_AH:
1996 tun_type = ICE_SW_TUN_IPV4_AH;
1998 case ICE_PROFID_IPV6_AH:
1999 tun_type = ICE_SW_TUN_IPV6_AH;
2001 case ICE_PROFID_IPV4_NAT_T:
2002 tun_type = ICE_SW_TUN_IPV4_NAT_T;
2004 case ICE_PROFID_IPV6_NAT_T:
2005 tun_type = ICE_SW_TUN_IPV6_NAT_T;
2007 case ICE_PROFID_IPV4_PFCP_NODE:
2009 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2011 case ICE_PROFID_IPV6_PFCP_NODE:
2013 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2015 case ICE_PROFID_IPV4_PFCP_SESSION:
2017 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2019 case ICE_PROFID_IPV6_PFCP_SESSION:
2021 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2023 case ICE_PROFID_MAC_IPV4_L2TPV3:
2024 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2026 case ICE_PROFID_MAC_IPV6_L2TPV3:
2027 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2029 case ICE_PROFID_IPV4_GTPU_TEID:
2030 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2032 case ICE_PROFID_IPV6_GTPU_TEID:
2033 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2044 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2045 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2046 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2047 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2048 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2049 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2050 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2051 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2052 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2053 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2054 else if (vlan && tun_type == ICE_NON_TUN)
2055 tun_type = ICE_NON_TUN_QINQ;
2061 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2062 * @hw: pointer to hardware structure
2063 * @recps: struct that we need to populate
2064 * @rid: recipe ID that we are populating
2065 * @refresh_required: true if we should get recipe to profile mapping from FW
2067 * This function is used to populate all the necessary entries into our
2068 * bookkeeping so that we have a current list of all the recipes that are
2069 * programmed in the firmware.
2071 static enum ice_status
2072 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2073 bool *refresh_required)
2075 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2076 struct ice_aqc_recipe_data_elem *tmp;
2077 u16 num_recps = ICE_MAX_NUM_RECIPES;
2078 struct ice_prot_lkup_ext *lkup_exts;
2079 enum ice_status status;
2084 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2086 /* we need a buffer big enough to accommodate all the recipes */
2087 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2088 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2090 return ICE_ERR_NO_MEMORY;
2092 tmp[0].recipe_indx = rid;
2093 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2094 /* non-zero status meaning recipe doesn't exist */
2098 /* Get recipe to profile map so that we can get the fv from lkups that
2099 * we read for a recipe from FW. Since we want to minimize the number of
2100 * times we make this FW call, just make one call and cache the copy
2101 * until a new recipe is added. This operation is only required the
2102 * first time to get the changes from FW. Then to search existing
2103 * entries we don't need to update the cache again until another recipe
2106 if (*refresh_required) {
2107 ice_get_recp_to_prof_map(hw);
2108 *refresh_required = false;
2111 /* Start populating all the entries for recps[rid] based on lkups from
2112 * firmware. Note that we are only creating the root recipe in our
2115 lkup_exts = &recps[rid].lkup_exts;
2117 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2118 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2119 struct ice_recp_grp_entry *rg_entry;
2120 u8 i, prof, idx, prot = 0;
2124 rg_entry = (struct ice_recp_grp_entry *)
2125 ice_malloc(hw, sizeof(*rg_entry));
2127 status = ICE_ERR_NO_MEMORY;
2131 idx = root_bufs.recipe_indx;
2132 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2134 /* Mark all result indices in this chain */
2135 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2136 ice_set_bit(root_bufs.content.result_indx &
2137 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2139 /* get the first profile that is associated with rid */
2140 prof = ice_find_first_bit(recipe_to_profile[idx],
2141 ICE_MAX_NUM_PROFILES);
2142 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2143 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2145 rg_entry->fv_idx[i] = lkup_indx;
2146 rg_entry->fv_mask[i] =
2147 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2149 /* If the recipe is a chained recipe then all its
2150 * child recipe's result will have a result index.
2151 * To fill fv_words we should not use those result
2152 * index, we only need the protocol ids and offsets.
2153 * We will skip all the fv_idx which stores result
2154 * index in them. We also need to skip any fv_idx which
2155 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2156 * valid offset value.
2158 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2159 rg_entry->fv_idx[i]) ||
2160 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2161 rg_entry->fv_idx[i] == 0)
2164 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2165 rg_entry->fv_idx[i], &prot, &off);
2166 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2167 lkup_exts->fv_words[fv_word_idx].off = off;
2168 lkup_exts->field_mask[fv_word_idx] =
2169 rg_entry->fv_mask[i];
2170 if (prot == ICE_META_DATA_ID_HW &&
2171 off == ICE_TUN_FLAG_MDID_OFF)
2175 /* populate rg_list with the data from the child entry of this
2178 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2180 /* Propagate some data to the recipe database */
2181 recps[idx].is_root = !!is_root;
2182 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2183 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2184 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2185 recps[idx].chain_idx = root_bufs.content.result_indx &
2186 ~ICE_AQ_RECIPE_RESULT_EN;
2187 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2189 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2195 /* Only do the following for root recipes entries */
2196 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2197 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2198 recps[idx].root_rid = root_bufs.content.rid &
2199 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2200 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2203 /* Complete initialization of the root recipe entry */
2204 lkup_exts->n_val_words = fv_word_idx;
2205 recps[rid].big_recp = (num_recps > 1);
2206 recps[rid].n_grp_count = (u8)num_recps;
2207 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2208 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2209 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2210 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2211 if (!recps[rid].root_buf)
2214 /* Copy result indexes */
2215 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2216 recps[rid].recp_created = true;
2224 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2225 * @hw: pointer to hardware structure
2227 * This function is used to populate recipe_to_profile matrix where index to
2228 * this array is the recipe ID and the element is the mapping of which profiles
2229 * is this recipe mapped to.
2231 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2233 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2236 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2239 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2240 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2241 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2243 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2244 ICE_MAX_NUM_RECIPES);
2245 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2246 ice_set_bit(i, recipe_to_profile[j]);
2251 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2252 * @hw: pointer to the HW struct
2253 * @recp_list: pointer to sw recipe list
2255 * Allocate memory for the entire recipe table and initialize the structures/
2256 * entries corresponding to basic recipes.
2259 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2261 struct ice_sw_recipe *recps;
2264 recps = (struct ice_sw_recipe *)
2265 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2267 return ICE_ERR_NO_MEMORY;
2269 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2270 recps[i].root_rid = i;
2271 INIT_LIST_HEAD(&recps[i].filt_rules);
2272 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2273 INIT_LIST_HEAD(&recps[i].rg_list);
2274 ice_init_lock(&recps[i].filt_rule_lock);
2283 * ice_aq_get_sw_cfg - get switch configuration
2284 * @hw: pointer to the hardware structure
2285 * @buf: pointer to the result buffer
2286 * @buf_size: length of the buffer available for response
2287 * @req_desc: pointer to requested descriptor
2288 * @num_elems: pointer to number of elements
2289 * @cd: pointer to command details structure or NULL
2291 * Get switch configuration (0x0200) to be placed in buf.
2292 * This admin command returns information such as initial VSI/port number
2293 * and switch ID it belongs to.
2295 * NOTE: *req_desc is both an input/output parameter.
2296 * The caller of this function first calls this function with *request_desc set
2297 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2298 * configuration information has been returned; if non-zero (meaning not all
2299 * the information was returned), the caller should call this function again
2300 * with *req_desc set to the previous value returned by f/w to get the
2301 * next block of switch configuration information.
2303 * *num_elems is output only parameter. This reflects the number of elements
2304 * in response buffer. The caller of this function to use *num_elems while
2305 * parsing the response buffer.
2307 static enum ice_status
2308 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2309 u16 buf_size, u16 *req_desc, u16 *num_elems,
2310 struct ice_sq_cd *cd)
2312 struct ice_aqc_get_sw_cfg *cmd;
2313 struct ice_aq_desc desc;
2314 enum ice_status status;
2316 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2317 cmd = &desc.params.get_sw_conf;
2318 cmd->element = CPU_TO_LE16(*req_desc);
2320 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2322 *req_desc = LE16_TO_CPU(cmd->element);
2323 *num_elems = LE16_TO_CPU(cmd->num_elems);
2330 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2331 * @hw: pointer to the HW struct
2332 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2333 * @global_lut_id: output parameter for the RSS global LUT's ID
2335 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2337 struct ice_aqc_alloc_free_res_elem *sw_buf;
2338 enum ice_status status;
2341 buf_len = ice_struct_size(sw_buf, elem, 1);
2342 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2344 return ICE_ERR_NO_MEMORY;
2346 sw_buf->num_elems = CPU_TO_LE16(1);
2347 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2348 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2349 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2351 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2353 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2354 shared_res ? "shared" : "dedicated", status);
2355 goto ice_alloc_global_lut_exit;
2358 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2360 ice_alloc_global_lut_exit:
2361 ice_free(hw, sw_buf);
2366 * ice_free_rss_global_lut - free a RSS global LUT
2367 * @hw: pointer to the HW struct
2368 * @global_lut_id: ID of the RSS global LUT to free
2370 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2372 struct ice_aqc_alloc_free_res_elem *sw_buf;
2373 u16 buf_len, num_elems = 1;
2374 enum ice_status status;
2376 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2377 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2379 return ICE_ERR_NO_MEMORY;
2381 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2382 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2383 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2385 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2387 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2388 global_lut_id, status);
2390 ice_free(hw, sw_buf);
2395 * ice_alloc_sw - allocate resources specific to switch
2396 * @hw: pointer to the HW struct
2397 * @ena_stats: true to turn on VEB stats
2398 * @shared_res: true for shared resource, false for dedicated resource
2399 * @sw_id: switch ID returned
2400 * @counter_id: VEB counter ID returned
2402 * allocates switch resources (SWID and VEB counter) (0x0208)
2405 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2408 struct ice_aqc_alloc_free_res_elem *sw_buf;
2409 struct ice_aqc_res_elem *sw_ele;
2410 enum ice_status status;
2413 buf_len = ice_struct_size(sw_buf, elem, 1);
2414 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2416 return ICE_ERR_NO_MEMORY;
2418 /* Prepare buffer for switch ID.
2419 * The number of resource entries in buffer is passed as 1 since only a
2420 * single switch/VEB instance is allocated, and hence a single sw_id
2423 sw_buf->num_elems = CPU_TO_LE16(1);
2425 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2426 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2427 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2429 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2430 ice_aqc_opc_alloc_res, NULL);
2433 goto ice_alloc_sw_exit;
2435 sw_ele = &sw_buf->elem[0];
2436 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2439 /* Prepare buffer for VEB Counter */
2440 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2441 struct ice_aqc_alloc_free_res_elem *counter_buf;
2442 struct ice_aqc_res_elem *counter_ele;
2444 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2445 ice_malloc(hw, buf_len);
2447 status = ICE_ERR_NO_MEMORY;
2448 goto ice_alloc_sw_exit;
2451 /* The number of resource entries in buffer is passed as 1 since
2452 * only a single switch/VEB instance is allocated, and hence a
2453 * single VEB counter is requested.
2455 counter_buf->num_elems = CPU_TO_LE16(1);
2456 counter_buf->res_type =
2457 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2458 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2459 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2463 ice_free(hw, counter_buf);
2464 goto ice_alloc_sw_exit;
2466 counter_ele = &counter_buf->elem[0];
2467 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2468 ice_free(hw, counter_buf);
2472 ice_free(hw, sw_buf);
2477 * ice_free_sw - free resources specific to switch
2478 * @hw: pointer to the HW struct
2479 * @sw_id: switch ID returned
2480 * @counter_id: VEB counter ID returned
2482 * free switch resources (SWID and VEB counter) (0x0209)
2484 * NOTE: This function frees multiple resources. It continues
2485 * releasing other resources even after it encounters error.
2486 * The error code returned is the last error it encountered.
2488 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2490 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2491 enum ice_status status, ret_status;
2494 buf_len = ice_struct_size(sw_buf, elem, 1);
2495 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2497 return ICE_ERR_NO_MEMORY;
2499 /* Prepare buffer to free for switch ID res.
2500 * The number of resource entries in buffer is passed as 1 since only a
2501 * single switch/VEB instance is freed, and hence a single sw_id
2504 sw_buf->num_elems = CPU_TO_LE16(1);
2505 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2506 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2508 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2509 ice_aqc_opc_free_res, NULL);
2512 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2514 /* Prepare buffer to free for VEB Counter resource */
2515 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2516 ice_malloc(hw, buf_len);
2518 ice_free(hw, sw_buf);
2519 return ICE_ERR_NO_MEMORY;
2522 /* The number of resource entries in buffer is passed as 1 since only a
2523 * single switch/VEB instance is freed, and hence a single VEB counter
2526 counter_buf->num_elems = CPU_TO_LE16(1);
2527 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2528 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2530 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2531 ice_aqc_opc_free_res, NULL);
2533 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2534 ret_status = status;
2537 ice_free(hw, counter_buf);
2538 ice_free(hw, sw_buf);
2544 * @hw: pointer to the HW struct
2545 * @vsi_ctx: pointer to a VSI context struct
2546 * @cd: pointer to command details structure or NULL
2548 * Add a VSI context to the hardware (0x0210)
2551 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2552 struct ice_sq_cd *cd)
2554 struct ice_aqc_add_update_free_vsi_resp *res;
2555 struct ice_aqc_add_get_update_free_vsi *cmd;
2556 struct ice_aq_desc desc;
2557 enum ice_status status;
2559 cmd = &desc.params.vsi_cmd;
2560 res = &desc.params.add_update_free_vsi_res;
2562 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2564 if (!vsi_ctx->alloc_from_pool)
2565 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2566 ICE_AQ_VSI_IS_VALID);
2568 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2570 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2572 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2573 sizeof(vsi_ctx->info), cd);
2576 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2577 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2578 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2586 * @hw: pointer to the HW struct
2587 * @vsi_ctx: pointer to a VSI context struct
2588 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2589 * @cd: pointer to command details structure or NULL
2591 * Free VSI context info from hardware (0x0213)
2594 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2595 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2597 struct ice_aqc_add_update_free_vsi_resp *resp;
2598 struct ice_aqc_add_get_update_free_vsi *cmd;
2599 struct ice_aq_desc desc;
2600 enum ice_status status;
2602 cmd = &desc.params.vsi_cmd;
2603 resp = &desc.params.add_update_free_vsi_res;
2605 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2607 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2609 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2611 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2613 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2614 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2622 * @hw: pointer to the HW struct
2623 * @vsi_ctx: pointer to a VSI context struct
2624 * @cd: pointer to command details structure or NULL
2626 * Update VSI context in the hardware (0x0211)
2629 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2630 struct ice_sq_cd *cd)
2632 struct ice_aqc_add_update_free_vsi_resp *resp;
2633 struct ice_aqc_add_get_update_free_vsi *cmd;
2634 struct ice_aq_desc desc;
2635 enum ice_status status;
2637 cmd = &desc.params.vsi_cmd;
2638 resp = &desc.params.add_update_free_vsi_res;
2640 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2642 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2644 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2646 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2647 sizeof(vsi_ctx->info), cd);
2650 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2651 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2658 * ice_is_vsi_valid - check whether the VSI is valid or not
2659 * @hw: pointer to the HW struct
2660 * @vsi_handle: VSI handle
2662 * check whether the VSI is valid or not
2664 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2666 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2670 * ice_get_hw_vsi_num - return the HW VSI number
2671 * @hw: pointer to the HW struct
2672 * @vsi_handle: VSI handle
2674 * return the HW VSI number
2675 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2677 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2679 return hw->vsi_ctx[vsi_handle]->vsi_num;
2683 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2684 * @hw: pointer to the HW struct
2685 * @vsi_handle: VSI handle
2687 * return the VSI context entry for a given VSI handle
2689 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2691 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2695 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2696 * @hw: pointer to the HW struct
2697 * @vsi_handle: VSI handle
2698 * @vsi: VSI context pointer
2700 * save the VSI context entry for a given VSI handle
2703 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2705 hw->vsi_ctx[vsi_handle] = vsi;
2709 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2710 * @hw: pointer to the HW struct
2711 * @vsi_handle: VSI handle
2713 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2715 struct ice_vsi_ctx *vsi;
2718 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2721 ice_for_each_traffic_class(i) {
2722 if (vsi->lan_q_ctx[i]) {
2723 ice_free(hw, vsi->lan_q_ctx[i]);
2724 vsi->lan_q_ctx[i] = NULL;
2730 * ice_clear_vsi_ctx - clear the VSI context entry
2731 * @hw: pointer to the HW struct
2732 * @vsi_handle: VSI handle
2734 * clear the VSI context entry
2736 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2738 struct ice_vsi_ctx *vsi;
2740 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2742 ice_clear_vsi_q_ctx(hw, vsi_handle);
2744 hw->vsi_ctx[vsi_handle] = NULL;
2749 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2750 * @hw: pointer to the HW struct
2752 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2756 for (i = 0; i < ICE_MAX_VSI; i++)
2757 ice_clear_vsi_ctx(hw, i);
2761 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2762 * @hw: pointer to the HW struct
2763 * @vsi_handle: unique VSI handle provided by drivers
2764 * @vsi_ctx: pointer to a VSI context struct
2765 * @cd: pointer to command details structure or NULL
2767 * Add a VSI context to the hardware also add it into the VSI handle list.
2768 * If this function gets called after reset for existing VSIs then update
2769 * with the new HW VSI number in the corresponding VSI handle list entry.
2772 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2773 struct ice_sq_cd *cd)
2775 struct ice_vsi_ctx *tmp_vsi_ctx;
2776 enum ice_status status;
2778 if (vsi_handle >= ICE_MAX_VSI)
2779 return ICE_ERR_PARAM;
2780 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2783 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2785 /* Create a new VSI context */
2786 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2787 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2789 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2790 return ICE_ERR_NO_MEMORY;
2792 *tmp_vsi_ctx = *vsi_ctx;
2794 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2796 /* update with new HW VSI num */
2797 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2804 * ice_free_vsi- free VSI context from hardware and VSI handle list
2805 * @hw: pointer to the HW struct
2806 * @vsi_handle: unique VSI handle
2807 * @vsi_ctx: pointer to a VSI context struct
2808 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2809 * @cd: pointer to command details structure or NULL
2811 * Free VSI context info from hardware as well as from VSI handle list
2814 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2815 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2817 enum ice_status status;
2819 if (!ice_is_vsi_valid(hw, vsi_handle))
2820 return ICE_ERR_PARAM;
2821 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2822 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2824 ice_clear_vsi_ctx(hw, vsi_handle);
2830 * @hw: pointer to the HW struct
2831 * @vsi_handle: unique VSI handle
2832 * @vsi_ctx: pointer to a VSI context struct
2833 * @cd: pointer to command details structure or NULL
2835 * Update VSI context in the hardware
2838 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2839 struct ice_sq_cd *cd)
2841 if (!ice_is_vsi_valid(hw, vsi_handle))
2842 return ICE_ERR_PARAM;
2843 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2844 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2848 * ice_aq_get_vsi_params
2849 * @hw: pointer to the HW struct
2850 * @vsi_ctx: pointer to a VSI context struct
2851 * @cd: pointer to command details structure or NULL
2853 * Get VSI context info from hardware (0x0212)
2856 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2857 struct ice_sq_cd *cd)
2859 struct ice_aqc_add_get_update_free_vsi *cmd;
2860 struct ice_aqc_get_vsi_resp *resp;
2861 struct ice_aq_desc desc;
2862 enum ice_status status;
2864 cmd = &desc.params.vsi_cmd;
2865 resp = &desc.params.get_vsi_resp;
2867 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2869 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2871 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2872 sizeof(vsi_ctx->info), cd);
2874 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2876 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2877 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2884 * ice_aq_add_update_mir_rule - add/update a mirror rule
2885 * @hw: pointer to the HW struct
2886 * @rule_type: Rule Type
2887 * @dest_vsi: VSI number to which packets will be mirrored
2888 * @count: length of the list
2889 * @mr_buf: buffer for list of mirrored VSI numbers
2890 * @cd: pointer to command details structure or NULL
2893 * Add/Update Mirror Rule (0x260).
2896 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2897 u16 count, struct ice_mir_rule_buf *mr_buf,
2898 struct ice_sq_cd *cd, u16 *rule_id)
2900 struct ice_aqc_add_update_mir_rule *cmd;
2901 struct ice_aq_desc desc;
2902 enum ice_status status;
2903 __le16 *mr_list = NULL;
2906 switch (rule_type) {
2907 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2908 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2909 /* Make sure count and mr_buf are set for these rule_types */
2910 if (!(count && mr_buf))
2911 return ICE_ERR_PARAM;
2913 buf_size = count * sizeof(__le16);
2914 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2916 return ICE_ERR_NO_MEMORY;
2918 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2919 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2920 /* Make sure count and mr_buf are not set for these
2923 if (count || mr_buf)
2924 return ICE_ERR_PARAM;
2927 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2928 return ICE_ERR_OUT_OF_RANGE;
2931 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2933 /* Pre-process 'mr_buf' items for add/update of virtual port
2934 * ingress/egress mirroring (but not physical port ingress/egress
2940 for (i = 0; i < count; i++) {
2943 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2945 /* Validate specified VSI number, make sure it is less
2946 * than ICE_MAX_VSI, if not return with error.
2948 if (id >= ICE_MAX_VSI) {
2949 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2951 ice_free(hw, mr_list);
2952 return ICE_ERR_OUT_OF_RANGE;
2955 /* add VSI to mirror rule */
2958 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2959 else /* remove VSI from mirror rule */
2960 mr_list[i] = CPU_TO_LE16(id);
2964 cmd = &desc.params.add_update_rule;
2965 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2966 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2967 ICE_AQC_RULE_ID_VALID_M);
2968 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2969 cmd->num_entries = CPU_TO_LE16(count);
2970 cmd->dest = CPU_TO_LE16(dest_vsi);
2972 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2974 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2976 ice_free(hw, mr_list);
2982 * ice_aq_delete_mir_rule - delete a mirror rule
2983 * @hw: pointer to the HW struct
2984 * @rule_id: Mirror rule ID (to be deleted)
2985 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2986 * otherwise it is returned to the shared pool
2987 * @cd: pointer to command details structure or NULL
2989 * Delete Mirror Rule (0x261).
2992 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2993 struct ice_sq_cd *cd)
2995 struct ice_aqc_delete_mir_rule *cmd;
2996 struct ice_aq_desc desc;
2998 /* rule_id should be in the range 0...63 */
2999 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3000 return ICE_ERR_OUT_OF_RANGE;
3002 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3004 cmd = &desc.params.del_rule;
3005 rule_id |= ICE_AQC_RULE_ID_VALID_M;
3006 cmd->rule_id = CPU_TO_LE16(rule_id);
3009 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3011 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3015 * ice_aq_alloc_free_vsi_list
3016 * @hw: pointer to the HW struct
3017 * @vsi_list_id: VSI list ID returned or used for lookup
3018 * @lkup_type: switch rule filter lookup type
3019 * @opc: switch rules population command type - pass in the command opcode
3021 * allocates or free a VSI list resource
3023 static enum ice_status
3024 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3025 enum ice_sw_lkup_type lkup_type,
3026 enum ice_adminq_opc opc)
3028 struct ice_aqc_alloc_free_res_elem *sw_buf;
3029 struct ice_aqc_res_elem *vsi_ele;
3030 enum ice_status status;
3033 buf_len = ice_struct_size(sw_buf, elem, 1);
3034 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3036 return ICE_ERR_NO_MEMORY;
3037 sw_buf->num_elems = CPU_TO_LE16(1);
3039 if (lkup_type == ICE_SW_LKUP_MAC ||
3040 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3041 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3042 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3043 lkup_type == ICE_SW_LKUP_PROMISC ||
3044 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3045 lkup_type == ICE_SW_LKUP_LAST) {
3046 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3047 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3049 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3051 status = ICE_ERR_PARAM;
3052 goto ice_aq_alloc_free_vsi_list_exit;
3055 if (opc == ice_aqc_opc_free_res)
3056 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3058 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3060 goto ice_aq_alloc_free_vsi_list_exit;
3062 if (opc == ice_aqc_opc_alloc_res) {
3063 vsi_ele = &sw_buf->elem[0];
3064 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3067 ice_aq_alloc_free_vsi_list_exit:
3068 ice_free(hw, sw_buf);
3073 * ice_aq_set_storm_ctrl - Sets storm control configuration
3074 * @hw: pointer to the HW struct
3075 * @bcast_thresh: represents the upper threshold for broadcast storm control
3076 * @mcast_thresh: represents the upper threshold for multicast storm control
3077 * @ctl_bitmask: storm control knobs
3079 * Sets the storm control configuration (0x0280)
3082 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3085 struct ice_aqc_storm_cfg *cmd;
3086 struct ice_aq_desc desc;
3088 cmd = &desc.params.storm_conf;
3090 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3092 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3093 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3094 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3096 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3100 * ice_aq_get_storm_ctrl - gets storm control configuration
3101 * @hw: pointer to the HW struct
3102 * @bcast_thresh: represents the upper threshold for broadcast storm control
3103 * @mcast_thresh: represents the upper threshold for multicast storm control
3104 * @ctl_bitmask: storm control knobs
3106 * Gets the storm control configuration (0x0281)
3109 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3112 enum ice_status status;
3113 struct ice_aq_desc desc;
3115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3117 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3119 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3122 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3125 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3128 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3135 * ice_aq_sw_rules - add/update/remove switch rules
3136 * @hw: pointer to the HW struct
3137 * @rule_list: pointer to switch rule population list
3138 * @rule_list_sz: total size of the rule list in bytes
3139 * @num_rules: number of switch rules in the rule_list
3140 * @opc: switch rules population command type - pass in the command opcode
3141 * @cd: pointer to command details structure or NULL
3143 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3145 static enum ice_status
3146 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3147 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3149 struct ice_aq_desc desc;
3150 enum ice_status status;
3152 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3154 if (opc != ice_aqc_opc_add_sw_rules &&
3155 opc != ice_aqc_opc_update_sw_rules &&
3156 opc != ice_aqc_opc_remove_sw_rules)
3157 return ICE_ERR_PARAM;
3159 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3161 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3162 desc.params.sw_rules.num_rules_fltr_entry_index =
3163 CPU_TO_LE16(num_rules);
3164 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3165 if (opc != ice_aqc_opc_add_sw_rules &&
3166 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3167 status = ICE_ERR_DOES_NOT_EXIST;
3173 * ice_aq_add_recipe - add switch recipe
3174 * @hw: pointer to the HW struct
3175 * @s_recipe_list: pointer to switch rule population list
3176 * @num_recipes: number of switch recipes in the list
3177 * @cd: pointer to command details structure or NULL
3182 ice_aq_add_recipe(struct ice_hw *hw,
3183 struct ice_aqc_recipe_data_elem *s_recipe_list,
3184 u16 num_recipes, struct ice_sq_cd *cd)
3186 struct ice_aqc_add_get_recipe *cmd;
3187 struct ice_aq_desc desc;
3190 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3191 cmd = &desc.params.add_get_recipe;
3192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3194 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3195 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3197 buf_size = num_recipes * sizeof(*s_recipe_list);
3199 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3203 * ice_aq_get_recipe - get switch recipe
3204 * @hw: pointer to the HW struct
3205 * @s_recipe_list: pointer to switch rule population list
3206 * @num_recipes: pointer to the number of recipes (input and output)
3207 * @recipe_root: root recipe number of recipe(s) to retrieve
3208 * @cd: pointer to command details structure or NULL
3212 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3213 * On output, *num_recipes will equal the number of entries returned in
3216 * The caller must supply enough space in s_recipe_list to hold all possible
3217 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3220 ice_aq_get_recipe(struct ice_hw *hw,
3221 struct ice_aqc_recipe_data_elem *s_recipe_list,
3222 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3224 struct ice_aqc_add_get_recipe *cmd;
3225 struct ice_aq_desc desc;
3226 enum ice_status status;
3229 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3230 return ICE_ERR_PARAM;
3232 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3233 cmd = &desc.params.add_get_recipe;
3234 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3236 cmd->return_index = CPU_TO_LE16(recipe_root);
3237 cmd->num_sub_recipes = 0;
3239 buf_size = *num_recipes * sizeof(*s_recipe_list);
3241 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3242 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3248 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3249 * @hw: pointer to the HW struct
3250 * @params: parameters used to update the default recipe
3252 * This function only supports updating default recipes and it only supports
3253 * updating a single recipe based on the lkup_idx at a time.
3255 * This is done as a read-modify-write operation. First, get the current recipe
3256 * contents based on the recipe's ID. Then modify the field vector index and
3257 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3258 * the pre-existing recipe with the modifications.
3261 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3262 struct ice_update_recipe_lkup_idx_params *params)
3264 struct ice_aqc_recipe_data_elem *rcp_list;
3265 u16 num_recps = ICE_MAX_NUM_RECIPES;
3266 enum ice_status status;
3268 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3270 return ICE_ERR_NO_MEMORY;
3272 /* read current recipe list from firmware */
3273 rcp_list->recipe_indx = params->rid;
3274 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3276 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3277 params->rid, status);
3281 /* only modify existing recipe's lkup_idx and mask if valid, while
3282 * leaving all other fields the same, then update the recipe firmware
3284 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3285 if (params->mask_valid)
3286 rcp_list->content.mask[params->lkup_idx] =
3287 CPU_TO_LE16(params->mask);
3289 if (params->ignore_valid)
3290 rcp_list->content.lkup_indx[params->lkup_idx] |=
3291 ICE_AQ_RECIPE_LKUP_IGNORE;
3293 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3295 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3296 params->rid, params->lkup_idx, params->fv_idx,
3297 params->mask, params->mask_valid ? "true" : "false",
3301 ice_free(hw, rcp_list);
3306 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3307 * @hw: pointer to the HW struct
3308 * @profile_id: package profile ID to associate the recipe with
3309 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3310 * @cd: pointer to command details structure or NULL
3311 * Recipe to profile association (0x0291)
3314 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3315 struct ice_sq_cd *cd)
3317 struct ice_aqc_recipe_to_profile *cmd;
3318 struct ice_aq_desc desc;
3320 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3321 cmd = &desc.params.recipe_to_profile;
3322 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3323 cmd->profile_id = CPU_TO_LE16(profile_id);
3324 /* Set the recipe ID bit in the bitmask to let the device know which
3325 * profile we are associating the recipe to
3327 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3328 ICE_NONDMA_TO_NONDMA);
3330 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3334 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3335 * @hw: pointer to the HW struct
3336 * @profile_id: package profile ID to associate the recipe with
3337 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3338 * @cd: pointer to command details structure or NULL
3339 * Associate profile ID with given recipe (0x0293)
3342 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3343 struct ice_sq_cd *cd)
3345 struct ice_aqc_recipe_to_profile *cmd;
3346 struct ice_aq_desc desc;
3347 enum ice_status status;
3349 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3350 cmd = &desc.params.recipe_to_profile;
3351 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3352 cmd->profile_id = CPU_TO_LE16(profile_id);
3354 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3356 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3357 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3363 * ice_alloc_recipe - add recipe resource
3364 * @hw: pointer to the hardware structure
3365 * @rid: recipe ID returned as response to AQ call
3367 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3369 struct ice_aqc_alloc_free_res_elem *sw_buf;
3370 enum ice_status status;
3373 buf_len = ice_struct_size(sw_buf, elem, 1);
3374 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3376 return ICE_ERR_NO_MEMORY;
3378 sw_buf->num_elems = CPU_TO_LE16(1);
3379 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3380 ICE_AQC_RES_TYPE_S) |
3381 ICE_AQC_RES_TYPE_FLAG_SHARED);
3382 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3383 ice_aqc_opc_alloc_res, NULL);
3385 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3386 ice_free(hw, sw_buf);
3391 /* ice_init_port_info - Initialize port_info with switch configuration data
3392 * @pi: pointer to port_info
3393 * @vsi_port_num: VSI number or port number
3394 * @type: Type of switch element (port or VSI)
3395 * @swid: switch ID of the switch the element is attached to
3396 * @pf_vf_num: PF or VF number
3397 * @is_vf: true if the element is a VF, false otherwise
3400 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3401 u16 swid, u16 pf_vf_num, bool is_vf)
3404 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3405 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3407 pi->pf_vf_num = pf_vf_num;
3409 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3410 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3413 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3418 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3419 * @hw: pointer to the hardware structure
3421 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3423 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3424 enum ice_status status;
3431 num_total_ports = 1;
3433 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3434 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3437 return ICE_ERR_NO_MEMORY;
3439 /* Multiple calls to ice_aq_get_sw_cfg may be required
3440 * to get all the switch configuration information. The need
3441 * for additional calls is indicated by ice_aq_get_sw_cfg
3442 * writing a non-zero value in req_desc
3445 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3447 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3448 &req_desc, &num_elems, NULL);
3453 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3454 u16 pf_vf_num, swid, vsi_port_num;
3458 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3459 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3461 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3462 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3464 swid = LE16_TO_CPU(ele->swid);
3466 if (LE16_TO_CPU(ele->pf_vf_num) &
3467 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3470 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3471 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3474 case ICE_AQC_GET_SW_CONF_RESP_VSI:
3475 if (hw->dcf_enabled && !is_vf)
3476 hw->pf_id = pf_vf_num;
3478 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3479 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3480 if (j == num_total_ports) {
3481 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3482 status = ICE_ERR_CFG;
3485 ice_init_port_info(hw->port_info,
3486 vsi_port_num, res_type, swid,
3494 } while (req_desc && !status);
3502 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3503 * @hw: pointer to the hardware structure
3504 * @fi: filter info structure to fill/update
3506 * This helper function populates the lb_en and lan_en elements of the provided
3507 * ice_fltr_info struct using the switch's type and characteristics of the
3508 * switch rule being configured.
3510 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3512 if ((fi->flag & ICE_FLTR_RX) &&
3513 (fi->fltr_act == ICE_FWD_TO_VSI ||
3514 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3515 fi->lkup_type == ICE_SW_LKUP_LAST)
3519 if ((fi->flag & ICE_FLTR_TX) &&
3520 (fi->fltr_act == ICE_FWD_TO_VSI ||
3521 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3522 fi->fltr_act == ICE_FWD_TO_Q ||
3523 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3524 /* Setting LB for prune actions will result in replicated
3525 * packets to the internal switch that will be dropped.
3527 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3530 /* Set lan_en to TRUE if
3531 * 1. The switch is a VEB AND
3533 * 2.1 The lookup is a directional lookup like ethertype,
3534 * promiscuous, ethertype-MAC, promiscuous-VLAN
3535 * and default-port OR
3536 * 2.2 The lookup is VLAN, OR
3537 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3538 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3542 * The switch is a VEPA.
3544 * In all other cases, the LAN enable has to be set to false.
3547 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3548 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3549 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3550 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3551 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3552 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3553 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3554 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3555 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3556 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3565 * ice_fill_sw_rule - Helper function to fill switch rule structure
3566 * @hw: pointer to the hardware structure
3567 * @f_info: entry containing packet forwarding information
3568 * @s_rule: switch rule structure to be filled in based on mac_entry
3569 * @opc: switch rules population command type - pass in the command opcode
3572 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3573 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3575 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3576 u16 vlan_tpid = ICE_ETH_P_8021Q;
3584 if (opc == ice_aqc_opc_remove_sw_rules) {
3585 s_rule->pdata.lkup_tx_rx.act = 0;
3586 s_rule->pdata.lkup_tx_rx.index =
3587 CPU_TO_LE16(f_info->fltr_rule_id);
3588 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3592 eth_hdr_sz = sizeof(dummy_eth_header);
3593 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3595 /* initialize the ether header with a dummy header */
3596 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3597 ice_fill_sw_info(hw, f_info);
3599 switch (f_info->fltr_act) {
3600 case ICE_FWD_TO_VSI:
3601 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3602 ICE_SINGLE_ACT_VSI_ID_M;
3603 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3604 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3605 ICE_SINGLE_ACT_VALID_BIT;
3607 case ICE_FWD_TO_VSI_LIST:
3608 act |= ICE_SINGLE_ACT_VSI_LIST;
3609 act |= (f_info->fwd_id.vsi_list_id <<
3610 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3611 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3612 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3613 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3614 ICE_SINGLE_ACT_VALID_BIT;
3617 act |= ICE_SINGLE_ACT_TO_Q;
3618 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3619 ICE_SINGLE_ACT_Q_INDEX_M;
3621 case ICE_DROP_PACKET:
3622 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3623 ICE_SINGLE_ACT_VALID_BIT;
3625 case ICE_FWD_TO_QGRP:
3626 q_rgn = f_info->qgrp_size > 0 ?
3627 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3628 act |= ICE_SINGLE_ACT_TO_Q;
3629 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3630 ICE_SINGLE_ACT_Q_INDEX_M;
3631 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3632 ICE_SINGLE_ACT_Q_REGION_M;
3639 act |= ICE_SINGLE_ACT_LB_ENABLE;
3641 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3643 switch (f_info->lkup_type) {
3644 case ICE_SW_LKUP_MAC:
3645 daddr = f_info->l_data.mac.mac_addr;
3647 case ICE_SW_LKUP_VLAN:
3648 vlan_id = f_info->l_data.vlan.vlan_id;
3649 if (f_info->l_data.vlan.tpid_valid)
3650 vlan_tpid = f_info->l_data.vlan.tpid;
3651 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3652 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3653 act |= ICE_SINGLE_ACT_PRUNE;
3654 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3657 case ICE_SW_LKUP_ETHERTYPE_MAC:
3658 daddr = f_info->l_data.ethertype_mac.mac_addr;
3660 case ICE_SW_LKUP_ETHERTYPE:
3661 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3662 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3664 case ICE_SW_LKUP_MAC_VLAN:
3665 daddr = f_info->l_data.mac_vlan.mac_addr;
3666 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3668 case ICE_SW_LKUP_PROMISC_VLAN:
3669 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3671 case ICE_SW_LKUP_PROMISC:
3672 daddr = f_info->l_data.mac_vlan.mac_addr;
3678 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3679 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3680 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3682 /* Recipe set depending on lookup type */
3683 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3684 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3685 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3688 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3689 ICE_NONDMA_TO_NONDMA);
3691 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3692 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3693 *off = CPU_TO_BE16(vlan_id);
3694 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3695 *off = CPU_TO_BE16(vlan_tpid);
3698 /* Create the switch rule with the final dummy Ethernet header */
3699 if (opc != ice_aqc_opc_update_sw_rules)
3700 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3704 * ice_add_marker_act
3705 * @hw: pointer to the hardware structure
3706 * @m_ent: the management entry for which sw marker needs to be added
3707 * @sw_marker: sw marker to tag the Rx descriptor with
3708 * @l_id: large action resource ID
3710 * Create a large action to hold software marker and update the switch rule
3711 * entry pointed by m_ent with newly created large action
3713 static enum ice_status
3714 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3715 u16 sw_marker, u16 l_id)
3717 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3718 /* For software marker we need 3 large actions
3719 * 1. FWD action: FWD TO VSI or VSI LIST
3720 * 2. GENERIC VALUE action to hold the profile ID
3721 * 3. GENERIC VALUE action to hold the software marker ID
3723 const u16 num_lg_acts = 3;
3724 enum ice_status status;
3730 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3731 return ICE_ERR_PARAM;
3733 /* Create two back-to-back switch rules and submit them to the HW using
3734 * one memory buffer:
3738 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3739 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3740 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3742 return ICE_ERR_NO_MEMORY;
3744 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3746 /* Fill in the first switch rule i.e. large action */
3747 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3748 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3749 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3751 /* First action VSI forwarding or VSI list forwarding depending on how
3754 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3755 m_ent->fltr_info.fwd_id.hw_vsi_id;
3757 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3758 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3759 if (m_ent->vsi_count > 1)
3760 act |= ICE_LG_ACT_VSI_LIST;
3761 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3763 /* Second action descriptor type */
3764 act = ICE_LG_ACT_GENERIC;
3766 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3767 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3769 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3770 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3772 /* Third action Marker value */
3773 act |= ICE_LG_ACT_GENERIC;
3774 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3775 ICE_LG_ACT_GENERIC_VALUE_M;
3777 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3779 /* call the fill switch rule to fill the lookup Tx Rx structure */
3780 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3781 ice_aqc_opc_update_sw_rules);
3783 /* Update the action to point to the large action ID */
3784 rx_tx->pdata.lkup_tx_rx.act =
3785 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3786 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3787 ICE_SINGLE_ACT_PTR_VAL_M));
3789 /* Use the filter rule ID of the previously created rule with single
3790 * act. Once the update happens, hardware will treat this as large
3793 rx_tx->pdata.lkup_tx_rx.index =
3794 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3796 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3797 ice_aqc_opc_update_sw_rules, NULL);
3799 m_ent->lg_act_idx = l_id;
3800 m_ent->sw_marker_id = sw_marker;
3803 ice_free(hw, lg_act);
3808 * ice_add_counter_act - add/update filter rule with counter action
3809 * @hw: pointer to the hardware structure
3810 * @m_ent: the management entry for which counter needs to be added
3811 * @counter_id: VLAN counter ID returned as part of allocate resource
3812 * @l_id: large action resource ID
3814 static enum ice_status
3815 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3816 u16 counter_id, u16 l_id)
3818 struct ice_aqc_sw_rules_elem *lg_act;
3819 struct ice_aqc_sw_rules_elem *rx_tx;
3820 enum ice_status status;
3821 /* 2 actions will be added while adding a large action counter */
3822 const int num_acts = 2;
3829 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3830 return ICE_ERR_PARAM;
3832 /* Create two back-to-back switch rules and submit them to the HW using
3833 * one memory buffer:
3837 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3838 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3839 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3841 return ICE_ERR_NO_MEMORY;
3843 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3845 /* Fill in the first switch rule i.e. large action */
3846 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3847 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3848 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3850 /* First action VSI forwarding or VSI list forwarding depending on how
3853 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3854 m_ent->fltr_info.fwd_id.hw_vsi_id;
3856 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3857 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3858 ICE_LG_ACT_VSI_LIST_ID_M;
3859 if (m_ent->vsi_count > 1)
3860 act |= ICE_LG_ACT_VSI_LIST;
3861 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3863 /* Second action counter ID */
3864 act = ICE_LG_ACT_STAT_COUNT;
3865 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3866 ICE_LG_ACT_STAT_COUNT_M;
3867 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3869 /* call the fill switch rule to fill the lookup Tx Rx structure */
3870 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3871 ice_aqc_opc_update_sw_rules);
3873 act = ICE_SINGLE_ACT_PTR;
3874 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3875 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3877 /* Use the filter rule ID of the previously created rule with single
3878 * act. Once the update happens, hardware will treat this as large
3881 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3882 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3884 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3885 ice_aqc_opc_update_sw_rules, NULL);
3887 m_ent->lg_act_idx = l_id;
3888 m_ent->counter_index = counter_id;
3891 ice_free(hw, lg_act);
3896 * ice_create_vsi_list_map
3897 * @hw: pointer to the hardware structure
3898 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3899 * @num_vsi: number of VSI handles in the array
3900 * @vsi_list_id: VSI list ID generated as part of allocate resource
3902 * Helper function to create a new entry of VSI list ID to VSI mapping
3903 * using the given VSI list ID
3905 static struct ice_vsi_list_map_info *
3906 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3909 struct ice_switch_info *sw = hw->switch_info;
3910 struct ice_vsi_list_map_info *v_map;
3913 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3917 v_map->vsi_list_id = vsi_list_id;
3919 for (i = 0; i < num_vsi; i++)
3920 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3922 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3927 * ice_update_vsi_list_rule
3928 * @hw: pointer to the hardware structure
3929 * @vsi_handle_arr: array of VSI handles to form a VSI list
3930 * @num_vsi: number of VSI handles in the array
3931 * @vsi_list_id: VSI list ID generated as part of allocate resource
3932 * @remove: Boolean value to indicate if this is a remove action
3933 * @opc: switch rules population command type - pass in the command opcode
3934 * @lkup_type: lookup type of the filter
3936 * Call AQ command to add a new switch rule or update existing switch rule
3937 * using the given VSI list ID
3939 static enum ice_status
3940 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3941 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3942 enum ice_sw_lkup_type lkup_type)
3944 struct ice_aqc_sw_rules_elem *s_rule;
3945 enum ice_status status;
3951 return ICE_ERR_PARAM;
3953 if (lkup_type == ICE_SW_LKUP_MAC ||
3954 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3955 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3956 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3957 lkup_type == ICE_SW_LKUP_PROMISC ||
3958 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3959 lkup_type == ICE_SW_LKUP_LAST)
3960 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3961 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3962 else if (lkup_type == ICE_SW_LKUP_VLAN)
3963 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3964 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3966 return ICE_ERR_PARAM;
3968 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3969 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3971 return ICE_ERR_NO_MEMORY;
3972 for (i = 0; i < num_vsi; i++) {
3973 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3974 status = ICE_ERR_PARAM;
3977 /* AQ call requires hw_vsi_id(s) */
3978 s_rule->pdata.vsi_list.vsi[i] =
3979 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3982 s_rule->type = CPU_TO_LE16(rule_type);
3983 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3984 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3986 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3989 ice_free(hw, s_rule);
3994 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3995 * @hw: pointer to the HW struct
3996 * @vsi_handle_arr: array of VSI handles to form a VSI list
3997 * @num_vsi: number of VSI handles in the array
3998 * @vsi_list_id: stores the ID of the VSI list to be created
3999 * @lkup_type: switch rule filter's lookup type
4001 static enum ice_status
4002 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4003 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4005 enum ice_status status;
4007 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4008 ice_aqc_opc_alloc_res);
4012 /* Update the newly created VSI list to include the specified VSIs */
4013 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4014 *vsi_list_id, false,
4015 ice_aqc_opc_add_sw_rules, lkup_type);
4019 * ice_create_pkt_fwd_rule
4020 * @hw: pointer to the hardware structure
4021 * @recp_list: corresponding filter management list
4022 * @f_entry: entry containing packet forwarding information
4024 * Create switch rule with given filter information and add an entry
4025 * to the corresponding filter management list to track this switch rule
4028 static enum ice_status
4029 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4030 struct ice_fltr_list_entry *f_entry)
4032 struct ice_fltr_mgmt_list_entry *fm_entry;
4033 struct ice_aqc_sw_rules_elem *s_rule;
4034 enum ice_status status;
4036 s_rule = (struct ice_aqc_sw_rules_elem *)
4037 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4039 return ICE_ERR_NO_MEMORY;
4040 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4041 ice_malloc(hw, sizeof(*fm_entry));
4043 status = ICE_ERR_NO_MEMORY;
4044 goto ice_create_pkt_fwd_rule_exit;
4047 fm_entry->fltr_info = f_entry->fltr_info;
4049 /* Initialize all the fields for the management entry */
4050 fm_entry->vsi_count = 1;
4051 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4052 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4053 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4055 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4056 ice_aqc_opc_add_sw_rules);
4058 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4059 ice_aqc_opc_add_sw_rules, NULL);
4061 ice_free(hw, fm_entry);
4062 goto ice_create_pkt_fwd_rule_exit;
4065 f_entry->fltr_info.fltr_rule_id =
4066 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4067 fm_entry->fltr_info.fltr_rule_id =
4068 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4070 /* The book keeping entries will get removed when base driver
4071 * calls remove filter AQ command
4073 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4075 ice_create_pkt_fwd_rule_exit:
4076 ice_free(hw, s_rule);
4081 * ice_update_pkt_fwd_rule
4082 * @hw: pointer to the hardware structure
4083 * @f_info: filter information for switch rule
4085 * Call AQ command to update a previously created switch rule with a
4088 static enum ice_status
4089 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4091 struct ice_aqc_sw_rules_elem *s_rule;
4092 enum ice_status status;
4094 s_rule = (struct ice_aqc_sw_rules_elem *)
4095 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4097 return ICE_ERR_NO_MEMORY;
4099 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4101 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4103 /* Update switch rule with new rule set to forward VSI list */
4104 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4105 ice_aqc_opc_update_sw_rules, NULL);
4107 ice_free(hw, s_rule);
4112 * ice_update_sw_rule_bridge_mode
4113 * @hw: pointer to the HW struct
4115 * Updates unicast switch filter rules based on VEB/VEPA mode
4117 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4119 struct ice_switch_info *sw = hw->switch_info;
4120 struct ice_fltr_mgmt_list_entry *fm_entry;
4121 enum ice_status status = ICE_SUCCESS;
4122 struct LIST_HEAD_TYPE *rule_head;
4123 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4125 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4126 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4128 ice_acquire_lock(rule_lock);
4129 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4131 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4132 u8 *addr = fi->l_data.mac.mac_addr;
4134 /* Update unicast Tx rules to reflect the selected
4137 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4138 (fi->fltr_act == ICE_FWD_TO_VSI ||
4139 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4140 fi->fltr_act == ICE_FWD_TO_Q ||
4141 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4142 status = ice_update_pkt_fwd_rule(hw, fi);
4148 ice_release_lock(rule_lock);
4154 * ice_add_update_vsi_list
4155 * @hw: pointer to the hardware structure
4156 * @m_entry: pointer to current filter management list entry
4157 * @cur_fltr: filter information from the book keeping entry
4158 * @new_fltr: filter information with the new VSI to be added
4160 * Call AQ command to add or update previously created VSI list with new VSI.
4162 * Helper function to do book keeping associated with adding filter information
4163 * The algorithm to do the book keeping is described below :
4164 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4165 * if only one VSI has been added till now
4166 * Allocate a new VSI list and add two VSIs
4167 * to this list using switch rule command
4168 * Update the previously created switch rule with the
4169 * newly created VSI list ID
4170 * if a VSI list was previously created
4171 * Add the new VSI to the previously created VSI list set
4172 * using the update switch rule command
4174 static enum ice_status
4175 ice_add_update_vsi_list(struct ice_hw *hw,
4176 struct ice_fltr_mgmt_list_entry *m_entry,
4177 struct ice_fltr_info *cur_fltr,
4178 struct ice_fltr_info *new_fltr)
4180 enum ice_status status = ICE_SUCCESS;
4181 u16 vsi_list_id = 0;
4183 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4184 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4185 return ICE_ERR_NOT_IMPL;
4187 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4188 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4189 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4190 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4191 return ICE_ERR_NOT_IMPL;
4193 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4194 /* Only one entry existed in the mapping and it was not already
4195 * a part of a VSI list. So, create a VSI list with the old and
4198 struct ice_fltr_info tmp_fltr;
4199 u16 vsi_handle_arr[2];
4201 /* A rule already exists with the new VSI being added */
4202 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4203 return ICE_ERR_ALREADY_EXISTS;
4205 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4206 vsi_handle_arr[1] = new_fltr->vsi_handle;
4207 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4209 new_fltr->lkup_type);
4213 tmp_fltr = *new_fltr;
4214 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4215 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4216 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4217 /* Update the previous switch rule of "MAC forward to VSI" to
4218 * "MAC fwd to VSI list"
4220 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4224 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4225 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4226 m_entry->vsi_list_info =
4227 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4230 if (!m_entry->vsi_list_info)
4231 return ICE_ERR_NO_MEMORY;
4233 /* If this entry was large action then the large action needs
4234 * to be updated to point to FWD to VSI list
4236 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4238 ice_add_marker_act(hw, m_entry,
4239 m_entry->sw_marker_id,
4240 m_entry->lg_act_idx);
4242 u16 vsi_handle = new_fltr->vsi_handle;
4243 enum ice_adminq_opc opcode;
4245 if (!m_entry->vsi_list_info)
4248 /* A rule already exists with the new VSI being added */
4249 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4252 /* Update the previously created VSI list set with
4253 * the new VSI ID passed in
4255 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4256 opcode = ice_aqc_opc_update_sw_rules;
4258 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4259 vsi_list_id, false, opcode,
4260 new_fltr->lkup_type);
4261 /* update VSI list mapping info with new VSI ID */
4263 ice_set_bit(vsi_handle,
4264 m_entry->vsi_list_info->vsi_map);
4267 m_entry->vsi_count++;
4272 * ice_find_rule_entry - Search a rule entry
4273 * @list_head: head of rule list
4274 * @f_info: rule information
4276 * Helper function to search for a given rule entry
4277 * Returns pointer to entry storing the rule if found
4279 static struct ice_fltr_mgmt_list_entry *
4280 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4281 struct ice_fltr_info *f_info)
4283 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4285 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4287 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4288 sizeof(f_info->l_data)) &&
4289 f_info->flag == list_itr->fltr_info.flag) {
4298 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4299 * @recp_list: VSI lists needs to be searched
4300 * @vsi_handle: VSI handle to be found in VSI list
4301 * @vsi_list_id: VSI list ID found containing vsi_handle
4303 * Helper function to search a VSI list with single entry containing given VSI
4304 * handle element. This can be extended further to search VSI list with more
4305 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4307 static struct ice_vsi_list_map_info *
4308 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4311 struct ice_vsi_list_map_info *map_info = NULL;
4312 struct LIST_HEAD_TYPE *list_head;
4314 list_head = &recp_list->filt_rules;
4315 if (recp_list->adv_rule) {
4316 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4318 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4319 ice_adv_fltr_mgmt_list_entry,
4321 if (list_itr->vsi_list_info) {
4322 map_info = list_itr->vsi_list_info;
4323 if (ice_is_bit_set(map_info->vsi_map,
4325 *vsi_list_id = map_info->vsi_list_id;
4331 struct ice_fltr_mgmt_list_entry *list_itr;
4333 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4334 ice_fltr_mgmt_list_entry,
4336 if (list_itr->vsi_count == 1 &&
4337 list_itr->vsi_list_info) {
4338 map_info = list_itr->vsi_list_info;
4339 if (ice_is_bit_set(map_info->vsi_map,
4341 *vsi_list_id = map_info->vsi_list_id;
4351 * ice_add_rule_internal - add rule for a given lookup type
4352 * @hw: pointer to the hardware structure
4353 * @recp_list: recipe list for which rule has to be added
4354 * @lport: logic port number on which function add rule
4355 * @f_entry: structure containing MAC forwarding information
4357 * Adds or updates the rule lists for a given recipe
4359 static enum ice_status
4360 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4361 u8 lport, struct ice_fltr_list_entry *f_entry)
4363 struct ice_fltr_info *new_fltr, *cur_fltr;
4364 struct ice_fltr_mgmt_list_entry *m_entry;
4365 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4366 enum ice_status status = ICE_SUCCESS;
4368 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4369 return ICE_ERR_PARAM;
4371 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4372 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4373 f_entry->fltr_info.fwd_id.hw_vsi_id =
4374 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4376 rule_lock = &recp_list->filt_rule_lock;
4378 ice_acquire_lock(rule_lock);
4379 new_fltr = &f_entry->fltr_info;
4380 if (new_fltr->flag & ICE_FLTR_RX)
4381 new_fltr->src = lport;
4382 else if (new_fltr->flag & ICE_FLTR_TX)
4384 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4386 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4388 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4389 goto exit_add_rule_internal;
4392 cur_fltr = &m_entry->fltr_info;
4393 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4395 exit_add_rule_internal:
4396 ice_release_lock(rule_lock);
4401 * ice_remove_vsi_list_rule
4402 * @hw: pointer to the hardware structure
4403 * @vsi_list_id: VSI list ID generated as part of allocate resource
4404 * @lkup_type: switch rule filter lookup type
4406 * The VSI list should be emptied before this function is called to remove the
4409 static enum ice_status
4410 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4411 enum ice_sw_lkup_type lkup_type)
4413 /* Free the vsi_list resource that we allocated. It is assumed that the
4414 * list is empty at this point.
4416 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4417 ice_aqc_opc_free_res);
4421 * ice_rem_update_vsi_list
4422 * @hw: pointer to the hardware structure
4423 * @vsi_handle: VSI handle of the VSI to remove
4424 * @fm_list: filter management entry for which the VSI list management needs to
4427 static enum ice_status
4428 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4429 struct ice_fltr_mgmt_list_entry *fm_list)
4431 enum ice_sw_lkup_type lkup_type;
4432 enum ice_status status = ICE_SUCCESS;
4435 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4436 fm_list->vsi_count == 0)
4437 return ICE_ERR_PARAM;
4439 /* A rule with the VSI being removed does not exist */
4440 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4441 return ICE_ERR_DOES_NOT_EXIST;
4443 lkup_type = fm_list->fltr_info.lkup_type;
4444 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4445 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4446 ice_aqc_opc_update_sw_rules,
4451 fm_list->vsi_count--;
4452 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4454 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4455 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4456 struct ice_vsi_list_map_info *vsi_list_info =
4457 fm_list->vsi_list_info;
4460 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4462 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4463 return ICE_ERR_OUT_OF_RANGE;
4465 /* Make sure VSI list is empty before removing it below */
4466 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4468 ice_aqc_opc_update_sw_rules,
4473 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4474 tmp_fltr_info.fwd_id.hw_vsi_id =
4475 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4476 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4477 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4479 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4480 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4484 fm_list->fltr_info = tmp_fltr_info;
4487 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4488 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4489 struct ice_vsi_list_map_info *vsi_list_info =
4490 fm_list->vsi_list_info;
4492 /* Remove the VSI list since it is no longer used */
4493 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4495 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4496 vsi_list_id, status);
4500 LIST_DEL(&vsi_list_info->list_entry);
4501 ice_free(hw, vsi_list_info);
4502 fm_list->vsi_list_info = NULL;
4509 * ice_remove_rule_internal - Remove a filter rule of a given type
4511 * @hw: pointer to the hardware structure
4512 * @recp_list: recipe list for which the rule needs to removed
4513 * @f_entry: rule entry containing filter information
4515 static enum ice_status
4516 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4517 struct ice_fltr_list_entry *f_entry)
4519 struct ice_fltr_mgmt_list_entry *list_elem;
4520 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4521 enum ice_status status = ICE_SUCCESS;
4522 bool remove_rule = false;
4525 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4526 return ICE_ERR_PARAM;
4527 f_entry->fltr_info.fwd_id.hw_vsi_id =
4528 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4530 rule_lock = &recp_list->filt_rule_lock;
4531 ice_acquire_lock(rule_lock);
4532 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4533 &f_entry->fltr_info);
4535 status = ICE_ERR_DOES_NOT_EXIST;
4539 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4541 } else if (!list_elem->vsi_list_info) {
4542 status = ICE_ERR_DOES_NOT_EXIST;
4544 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4545 /* a ref_cnt > 1 indicates that the vsi_list is being
4546 * shared by multiple rules. Decrement the ref_cnt and
4547 * remove this rule, but do not modify the list, as it
4548 * is in-use by other rules.
4550 list_elem->vsi_list_info->ref_cnt--;
4553 /* a ref_cnt of 1 indicates the vsi_list is only used
4554 * by one rule. However, the original removal request is only
4555 * for a single VSI. Update the vsi_list first, and only
4556 * remove the rule if there are no further VSIs in this list.
4558 vsi_handle = f_entry->fltr_info.vsi_handle;
4559 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4562 /* if VSI count goes to zero after updating the VSI list */
4563 if (list_elem->vsi_count == 0)
4568 /* Remove the lookup rule */
4569 struct ice_aqc_sw_rules_elem *s_rule;
4571 s_rule = (struct ice_aqc_sw_rules_elem *)
4572 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4574 status = ICE_ERR_NO_MEMORY;
4578 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4579 ice_aqc_opc_remove_sw_rules);
4581 status = ice_aq_sw_rules(hw, s_rule,
4582 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4583 ice_aqc_opc_remove_sw_rules, NULL);
4585 /* Remove a book keeping from the list */
4586 ice_free(hw, s_rule);
4591 LIST_DEL(&list_elem->list_entry);
4592 ice_free(hw, list_elem);
4595 ice_release_lock(rule_lock);
4600 * ice_aq_get_res_alloc - get allocated resources
4601 * @hw: pointer to the HW struct
4602 * @num_entries: pointer to u16 to store the number of resource entries returned
4603 * @buf: pointer to buffer
4604 * @buf_size: size of buf
4605 * @cd: pointer to command details structure or NULL
4607 * The caller-supplied buffer must be large enough to store the resource
4608 * information for all resource types. Each resource type is an
4609 * ice_aqc_get_res_resp_elem structure.
4612 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4613 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4614 struct ice_sq_cd *cd)
4616 struct ice_aqc_get_res_alloc *resp;
4617 enum ice_status status;
4618 struct ice_aq_desc desc;
4621 return ICE_ERR_BAD_PTR;
4623 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4624 return ICE_ERR_INVAL_SIZE;
4626 resp = &desc.params.get_res;
4628 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4629 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4631 if (!status && num_entries)
4632 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4638 * ice_aq_get_res_descs - get allocated resource descriptors
4639 * @hw: pointer to the hardware structure
4640 * @num_entries: number of resource entries in buffer
4641 * @buf: structure to hold response data buffer
4642 * @buf_size: size of buffer
4643 * @res_type: resource type
4644 * @res_shared: is resource shared
4645 * @desc_id: input - first desc ID to start; output - next desc ID
4646 * @cd: pointer to command details structure or NULL
4649 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4650 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4651 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4653 struct ice_aqc_get_allocd_res_desc *cmd;
4654 struct ice_aq_desc desc;
4655 enum ice_status status;
4657 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4659 cmd = &desc.params.get_res_desc;
4662 return ICE_ERR_PARAM;
4664 if (buf_size != (num_entries * sizeof(*buf)))
4665 return ICE_ERR_PARAM;
4667 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4669 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4670 ICE_AQC_RES_TYPE_M) | (res_shared ?
4671 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4672 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4674 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4676 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4682 * ice_add_mac_rule - Add a MAC address based filter rule
4683 * @hw: pointer to the hardware structure
4684 * @m_list: list of MAC addresses and forwarding information
4685 * @sw: pointer to switch info struct for which function add rule
4686 * @lport: logic port number on which function add rule
4688 * IMPORTANT: When the umac_shared flag is set to false and m_list has
4689 * multiple unicast addresses, the function assumes that all the
4690 * addresses are unique in a given add_mac call. It doesn't
4691 * check for duplicates in this case, removing duplicates from a given
4692 * list should be taken care of in the caller of this function.
4694 static enum ice_status
4695 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4696 struct ice_switch_info *sw, u8 lport)
4698 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4699 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4700 struct ice_fltr_list_entry *m_list_itr;
4701 struct LIST_HEAD_TYPE *rule_head;
4702 u16 total_elem_left, s_rule_size;
4703 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4704 enum ice_status status = ICE_SUCCESS;
4705 u16 num_unicast = 0;
4709 rule_lock = &recp_list->filt_rule_lock;
4710 rule_head = &recp_list->filt_rules;
4712 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4714 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4718 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4719 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4720 if (!ice_is_vsi_valid(hw, vsi_handle))
4721 return ICE_ERR_PARAM;
4722 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4723 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4724 /* update the src in case it is VSI num */
4725 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4726 return ICE_ERR_PARAM;
4727 m_list_itr->fltr_info.src = hw_vsi_id;
4728 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4729 IS_ZERO_ETHER_ADDR(add))
4730 return ICE_ERR_PARAM;
4731 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
4732 /* Don't overwrite the unicast address */
4733 ice_acquire_lock(rule_lock);
4734 if (ice_find_rule_entry(rule_head,
4735 &m_list_itr->fltr_info)) {
4736 ice_release_lock(rule_lock);
4739 ice_release_lock(rule_lock);
4741 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4742 (IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
4743 m_list_itr->status =
4744 ice_add_rule_internal(hw, recp_list, lport,
4746 if (m_list_itr->status)
4747 return m_list_itr->status;
4751 ice_acquire_lock(rule_lock);
4752 /* Exit if no suitable entries were found for adding bulk switch rule */
4754 status = ICE_SUCCESS;
4755 goto ice_add_mac_exit;
4758 /* Allocate switch rule buffer for the bulk update for unicast */
4759 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4760 s_rule = (struct ice_aqc_sw_rules_elem *)
4761 ice_calloc(hw, num_unicast, s_rule_size);
4763 status = ICE_ERR_NO_MEMORY;
4764 goto ice_add_mac_exit;
4768 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4770 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4771 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4773 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4774 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4775 ice_aqc_opc_add_sw_rules);
4776 r_iter = (struct ice_aqc_sw_rules_elem *)
4777 ((u8 *)r_iter + s_rule_size);
4781 /* Call AQ bulk switch rule update for all unicast addresses */
4783 /* Call AQ switch rule in AQ_MAX chunk */
4784 for (total_elem_left = num_unicast; total_elem_left > 0;
4785 total_elem_left -= elem_sent) {
4786 struct ice_aqc_sw_rules_elem *entry = r_iter;
4788 elem_sent = MIN_T(u8, total_elem_left,
4789 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4790 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4791 elem_sent, ice_aqc_opc_add_sw_rules,
4794 goto ice_add_mac_exit;
4795 r_iter = (struct ice_aqc_sw_rules_elem *)
4796 ((u8 *)r_iter + (elem_sent * s_rule_size));
4799 /* Fill up rule ID based on the value returned from FW */
4801 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4803 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4804 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4805 struct ice_fltr_mgmt_list_entry *fm_entry;
4807 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4808 f_info->fltr_rule_id =
4809 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4810 f_info->fltr_act = ICE_FWD_TO_VSI;
4811 /* Create an entry to track this MAC address */
4812 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4813 ice_malloc(hw, sizeof(*fm_entry));
4815 status = ICE_ERR_NO_MEMORY;
4816 goto ice_add_mac_exit;
4818 fm_entry->fltr_info = *f_info;
4819 fm_entry->vsi_count = 1;
4820 /* The book keeping entries will get removed when
4821 * base driver calls remove filter AQ command
4824 LIST_ADD(&fm_entry->list_entry, rule_head);
4825 r_iter = (struct ice_aqc_sw_rules_elem *)
4826 ((u8 *)r_iter + s_rule_size);
4831 ice_release_lock(rule_lock);
4833 ice_free(hw, s_rule);
4838 * ice_add_mac - Add a MAC address based filter rule
4839 * @hw: pointer to the hardware structure
4840 * @m_list: list of MAC addresses and forwarding information
4842 * Function add MAC rule for logical port from HW struct
4844 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4847 return ICE_ERR_PARAM;
4849 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4850 hw->port_info->lport);
4854 * ice_add_vlan_internal - Add one VLAN based filter rule
4855 * @hw: pointer to the hardware structure
4856 * @recp_list: recipe list for which rule has to be added
4857 * @f_entry: filter entry containing one VLAN information
4859 static enum ice_status
4860 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4861 struct ice_fltr_list_entry *f_entry)
4863 struct ice_fltr_mgmt_list_entry *v_list_itr;
4864 struct ice_fltr_info *new_fltr, *cur_fltr;
4865 enum ice_sw_lkup_type lkup_type;
4866 u16 vsi_list_id = 0, vsi_handle;
4867 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4868 enum ice_status status = ICE_SUCCESS;
4870 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4871 return ICE_ERR_PARAM;
4873 f_entry->fltr_info.fwd_id.hw_vsi_id =
4874 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4875 new_fltr = &f_entry->fltr_info;
4877 /* VLAN ID should only be 12 bits */
4878 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4879 return ICE_ERR_PARAM;
4881 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4882 return ICE_ERR_PARAM;
4884 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4885 lkup_type = new_fltr->lkup_type;
4886 vsi_handle = new_fltr->vsi_handle;
4887 rule_lock = &recp_list->filt_rule_lock;
4888 ice_acquire_lock(rule_lock);
4889 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4891 struct ice_vsi_list_map_info *map_info = NULL;
4893 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4894 /* All VLAN pruning rules use a VSI list. Check if
4895 * there is already a VSI list containing VSI that we
4896 * want to add. If found, use the same vsi_list_id for
4897 * this new VLAN rule or else create a new list.
4899 map_info = ice_find_vsi_list_entry(recp_list,
4903 status = ice_create_vsi_list_rule(hw,
4911 /* Convert the action to forwarding to a VSI list. */
4912 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4913 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4916 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4918 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4921 status = ICE_ERR_DOES_NOT_EXIST;
4924 /* reuse VSI list for new rule and increment ref_cnt */
4926 v_list_itr->vsi_list_info = map_info;
4927 map_info->ref_cnt++;
4929 v_list_itr->vsi_list_info =
4930 ice_create_vsi_list_map(hw, &vsi_handle,
4934 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4935 /* Update existing VSI list to add new VSI ID only if it used
4938 cur_fltr = &v_list_itr->fltr_info;
4939 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4942 /* If VLAN rule exists and VSI list being used by this rule is
4943 * referenced by more than 1 VLAN rule. Then create a new VSI
4944 * list appending previous VSI with new VSI and update existing
4945 * VLAN rule to point to new VSI list ID
4947 struct ice_fltr_info tmp_fltr;
4948 u16 vsi_handle_arr[2];
4951 /* Current implementation only supports reusing VSI list with
4952 * one VSI count. We should never hit below condition
4954 if (v_list_itr->vsi_count > 1 &&
4955 v_list_itr->vsi_list_info->ref_cnt > 1) {
4956 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4957 status = ICE_ERR_CFG;
4962 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4965 /* A rule already exists with the new VSI being added */
4966 if (cur_handle == vsi_handle) {
4967 status = ICE_ERR_ALREADY_EXISTS;
4971 vsi_handle_arr[0] = cur_handle;
4972 vsi_handle_arr[1] = vsi_handle;
4973 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4974 &vsi_list_id, lkup_type);
4978 tmp_fltr = v_list_itr->fltr_info;
4979 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4980 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4981 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4982 /* Update the previous switch rule to a new VSI list which
4983 * includes current VSI that is requested
4985 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4989 /* before overriding VSI list map info. decrement ref_cnt of
4992 v_list_itr->vsi_list_info->ref_cnt--;
4994 /* now update to newly created list */
4995 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4996 v_list_itr->vsi_list_info =
4997 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4999 v_list_itr->vsi_count++;
5003 ice_release_lock(rule_lock);
5008 * ice_add_vlan_rule - Add VLAN based filter rule
5009 * @hw: pointer to the hardware structure
5010 * @v_list: list of VLAN entries and forwarding information
5011 * @sw: pointer to switch info struct for which function add rule
5013 static enum ice_status
5014 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5015 struct ice_switch_info *sw)
5017 struct ice_fltr_list_entry *v_list_itr;
5018 struct ice_sw_recipe *recp_list;
5020 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5021 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5023 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5024 return ICE_ERR_PARAM;
5025 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5026 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5028 if (v_list_itr->status)
5029 return v_list_itr->status;
5035 * ice_add_vlan - Add a VLAN based filter rule
5036 * @hw: pointer to the hardware structure
5037 * @v_list: list of VLAN and forwarding information
5039 * Function add VLAN rule for logical port from HW struct
5041 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5044 return ICE_ERR_PARAM;
5046 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5050 * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5051 * @hw: pointer to the hardware structure
5052 * @mv_list: list of MAC and VLAN filters
5053 * @sw: pointer to switch info struct for which function add rule
5054 * @lport: logic port number on which function add rule
5056 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5057 * pruning bits enabled, then it is the responsibility of the caller to make
5058 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5059 * VLAN won't be received on that VSI otherwise.
5061 static enum ice_status
5062 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5063 struct ice_switch_info *sw, u8 lport)
5065 struct ice_fltr_list_entry *mv_list_itr;
5066 struct ice_sw_recipe *recp_list;
5068 if (!mv_list || !hw)
5069 return ICE_ERR_PARAM;
5071 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5072 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5074 enum ice_sw_lkup_type l_type =
5075 mv_list_itr->fltr_info.lkup_type;
5077 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5078 return ICE_ERR_PARAM;
5079 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5080 mv_list_itr->status =
5081 ice_add_rule_internal(hw, recp_list, lport,
5083 if (mv_list_itr->status)
5084 return mv_list_itr->status;
5090 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5091 * @hw: pointer to the hardware structure
5092 * @mv_list: list of MAC VLAN addresses and forwarding information
5094 * Function add MAC VLAN rule for logical port from HW struct
5097 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5099 if (!mv_list || !hw)
5100 return ICE_ERR_PARAM;
5102 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5103 hw->port_info->lport);
5107 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5108 * @hw: pointer to the hardware structure
5109 * @em_list: list of ether type MAC filter, MAC is optional
5110 * @sw: pointer to switch info struct for which function add rule
5111 * @lport: logic port number on which function add rule
5113 * This function requires the caller to populate the entries in
5114 * the filter list with the necessary fields (including flags to
5115 * indicate Tx or Rx rules).
5117 static enum ice_status
5118 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5119 struct ice_switch_info *sw, u8 lport)
5121 struct ice_fltr_list_entry *em_list_itr;
5123 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5125 struct ice_sw_recipe *recp_list;
5126 enum ice_sw_lkup_type l_type;
5128 l_type = em_list_itr->fltr_info.lkup_type;
5129 recp_list = &sw->recp_list[l_type];
5131 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5132 l_type != ICE_SW_LKUP_ETHERTYPE)
5133 return ICE_ERR_PARAM;
5135 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5138 if (em_list_itr->status)
5139 return em_list_itr->status;
5145 * ice_add_eth_mac - Add a ethertype based filter rule
5146 * @hw: pointer to the hardware structure
5147 * @em_list: list of ethertype and forwarding information
5149 * Function add ethertype rule for logical port from HW struct
5152 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5154 if (!em_list || !hw)
5155 return ICE_ERR_PARAM;
5157 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5158 hw->port_info->lport);
5162 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5163 * @hw: pointer to the hardware structure
5164 * @em_list: list of ethertype or ethertype MAC entries
5165 * @sw: pointer to switch info struct for which function add rule
5167 static enum ice_status
5168 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5169 struct ice_switch_info *sw)
5171 struct ice_fltr_list_entry *em_list_itr, *tmp;
5173 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5175 struct ice_sw_recipe *recp_list;
5176 enum ice_sw_lkup_type l_type;
5178 l_type = em_list_itr->fltr_info.lkup_type;
5180 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5181 l_type != ICE_SW_LKUP_ETHERTYPE)
5182 return ICE_ERR_PARAM;
5184 recp_list = &sw->recp_list[l_type];
5185 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5187 if (em_list_itr->status)
5188 return em_list_itr->status;
5194 * ice_remove_eth_mac - remove a ethertype based filter rule
5195 * @hw: pointer to the hardware structure
5196 * @em_list: list of ethertype and forwarding information
5200 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5202 if (!em_list || !hw)
5203 return ICE_ERR_PARAM;
5205 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5209 * ice_rem_sw_rule_info
5210 * @hw: pointer to the hardware structure
5211 * @rule_head: pointer to the switch list structure that we want to delete
5214 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5216 if (!LIST_EMPTY(rule_head)) {
5217 struct ice_fltr_mgmt_list_entry *entry;
5218 struct ice_fltr_mgmt_list_entry *tmp;
5220 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5221 ice_fltr_mgmt_list_entry, list_entry) {
5222 LIST_DEL(&entry->list_entry);
5223 ice_free(hw, entry);
5229 * ice_rem_adv_rule_info
5230 * @hw: pointer to the hardware structure
5231 * @rule_head: pointer to the switch list structure that we want to delete
5234 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5236 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5237 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5239 if (LIST_EMPTY(rule_head))
5242 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5243 ice_adv_fltr_mgmt_list_entry, list_entry) {
5244 LIST_DEL(&lst_itr->list_entry);
5245 ice_free(hw, lst_itr->lkups);
5246 ice_free(hw, lst_itr);
5251 * ice_rem_all_sw_rules_info
5252 * @hw: pointer to the hardware structure
5254 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5256 struct ice_switch_info *sw = hw->switch_info;
5259 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5260 struct LIST_HEAD_TYPE *rule_head;
5262 rule_head = &sw->recp_list[i].filt_rules;
5263 if (!sw->recp_list[i].adv_rule)
5264 ice_rem_sw_rule_info(hw, rule_head);
5266 ice_rem_adv_rule_info(hw, rule_head);
5267 if (sw->recp_list[i].adv_rule &&
5268 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5269 sw->recp_list[i].adv_rule = false;
5274 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5275 * @pi: pointer to the port_info structure
5276 * @vsi_handle: VSI handle to set as default
5277 * @set: true to add the above mentioned switch rule, false to remove it
5278 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5280 * add filter rule to set/unset given VSI as default VSI for the switch
5281 * (represented by swid)
5284 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5287 struct ice_aqc_sw_rules_elem *s_rule;
5288 struct ice_fltr_info f_info;
5289 struct ice_hw *hw = pi->hw;
5290 enum ice_adminq_opc opcode;
5291 enum ice_status status;
5295 if (!ice_is_vsi_valid(hw, vsi_handle))
5296 return ICE_ERR_PARAM;
5297 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5299 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5300 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5302 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5304 return ICE_ERR_NO_MEMORY;
5306 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5308 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5309 f_info.flag = direction;
5310 f_info.fltr_act = ICE_FWD_TO_VSI;
5311 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5313 if (f_info.flag & ICE_FLTR_RX) {
5314 f_info.src = pi->lport;
5315 f_info.src_id = ICE_SRC_ID_LPORT;
5317 f_info.fltr_rule_id =
5318 pi->dflt_rx_vsi_rule_id;
5319 } else if (f_info.flag & ICE_FLTR_TX) {
5320 f_info.src_id = ICE_SRC_ID_VSI;
5321 f_info.src = hw_vsi_id;
5323 f_info.fltr_rule_id =
5324 pi->dflt_tx_vsi_rule_id;
5328 opcode = ice_aqc_opc_add_sw_rules;
5330 opcode = ice_aqc_opc_remove_sw_rules;
5332 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5334 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5335 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5338 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5340 if (f_info.flag & ICE_FLTR_TX) {
5341 pi->dflt_tx_vsi_num = hw_vsi_id;
5342 pi->dflt_tx_vsi_rule_id = index;
5343 } else if (f_info.flag & ICE_FLTR_RX) {
5344 pi->dflt_rx_vsi_num = hw_vsi_id;
5345 pi->dflt_rx_vsi_rule_id = index;
5348 if (f_info.flag & ICE_FLTR_TX) {
5349 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5350 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5351 } else if (f_info.flag & ICE_FLTR_RX) {
5352 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5353 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5358 ice_free(hw, s_rule);
5363 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5364 * @list_head: head of rule list
5365 * @f_info: rule information
5367 * Helper function to search for a unicast rule entry - this is to be used
5368 * to remove unicast MAC filter that is not shared with other VSIs on the
5371 * Returns pointer to entry storing the rule if found
5373 static struct ice_fltr_mgmt_list_entry *
5374 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5375 struct ice_fltr_info *f_info)
5377 struct ice_fltr_mgmt_list_entry *list_itr;
5379 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5381 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5382 sizeof(f_info->l_data)) &&
5383 f_info->fwd_id.hw_vsi_id ==
5384 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5385 f_info->flag == list_itr->fltr_info.flag)
5392 * ice_remove_mac_rule - remove a MAC based filter rule
5393 * @hw: pointer to the hardware structure
5394 * @m_list: list of MAC addresses and forwarding information
5395 * @recp_list: list from which function remove MAC address
5397 * This function removes either a MAC filter rule or a specific VSI from a
5398 * VSI list for a multicast MAC address.
5400 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5401 * ice_add_mac. Caller should be aware that this call will only work if all
5402 * the entries passed into m_list were added previously. It will not attempt to
5403 * do a partial remove of entries that were found.
5405 static enum ice_status
5406 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5407 struct ice_sw_recipe *recp_list)
5409 struct ice_fltr_list_entry *list_itr, *tmp;
5410 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5413 return ICE_ERR_PARAM;
5415 rule_lock = &recp_list->filt_rule_lock;
5416 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5418 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5419 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5422 if (l_type != ICE_SW_LKUP_MAC)
5423 return ICE_ERR_PARAM;
5425 vsi_handle = list_itr->fltr_info.vsi_handle;
5426 if (!ice_is_vsi_valid(hw, vsi_handle))
5427 return ICE_ERR_PARAM;
5429 list_itr->fltr_info.fwd_id.hw_vsi_id =
5430 ice_get_hw_vsi_num(hw, vsi_handle);
5431 if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
5432 /* Don't remove the unicast address that belongs to
5433 * another VSI on the switch, since it is not being
5436 ice_acquire_lock(rule_lock);
5437 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5438 &list_itr->fltr_info)) {
5439 ice_release_lock(rule_lock);
5440 return ICE_ERR_DOES_NOT_EXIST;
5442 ice_release_lock(rule_lock);
5444 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5446 if (list_itr->status)
5447 return list_itr->status;
5453 * ice_remove_mac - remove a MAC address based filter rule
5454 * @hw: pointer to the hardware structure
5455 * @m_list: list of MAC addresses and forwarding information
5458 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5460 struct ice_sw_recipe *recp_list;
5462 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5463 return ice_remove_mac_rule(hw, m_list, recp_list);
5467 * ice_remove_vlan_rule - Remove VLAN based filter rule
5468 * @hw: pointer to the hardware structure
5469 * @v_list: list of VLAN entries and forwarding information
5470 * @recp_list: list from which function remove VLAN
5472 static enum ice_status
5473 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5474 struct ice_sw_recipe *recp_list)
5476 struct ice_fltr_list_entry *v_list_itr, *tmp;
5478 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5480 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5482 if (l_type != ICE_SW_LKUP_VLAN)
5483 return ICE_ERR_PARAM;
5484 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5486 if (v_list_itr->status)
5487 return v_list_itr->status;
5493 * ice_remove_vlan - remove a VLAN address based filter rule
5494 * @hw: pointer to the hardware structure
5495 * @v_list: list of VLAN and forwarding information
5499 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5501 struct ice_sw_recipe *recp_list;
5504 return ICE_ERR_PARAM;
5506 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5507 return ice_remove_vlan_rule(hw, v_list, recp_list);
5511 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5512 * @hw: pointer to the hardware structure
5513 * @v_list: list of MAC VLAN entries and forwarding information
5514 * @recp_list: list from which function remove MAC VLAN
5516 static enum ice_status
5517 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5518 struct ice_sw_recipe *recp_list)
5520 struct ice_fltr_list_entry *v_list_itr, *tmp;
5522 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5523 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5525 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5527 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5528 return ICE_ERR_PARAM;
5529 v_list_itr->status =
5530 ice_remove_rule_internal(hw, recp_list,
5532 if (v_list_itr->status)
5533 return v_list_itr->status;
5539 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5540 * @hw: pointer to the hardware structure
5541 * @mv_list: list of MAC VLAN and forwarding information
5544 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5546 struct ice_sw_recipe *recp_list;
5548 if (!mv_list || !hw)
5549 return ICE_ERR_PARAM;
5551 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5552 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5556 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5557 * @fm_entry: filter entry to inspect
5558 * @vsi_handle: VSI handle to compare with filter info
5561 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5563 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5564 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5565 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5566 fm_entry->vsi_list_info &&
5567 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5572 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5573 * @hw: pointer to the hardware structure
5574 * @vsi_handle: VSI handle to remove filters from
5575 * @vsi_list_head: pointer to the list to add entry to
5576 * @fi: pointer to fltr_info of filter entry to copy & add
5578 * Helper function, used when creating a list of filters to remove from
5579 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5580 * original filter entry, with the exception of fltr_info.fltr_act and
5581 * fltr_info.fwd_id fields. These are set such that later logic can
5582 * extract which VSI to remove the fltr from, and pass on that information.
5584 static enum ice_status
5585 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5586 struct LIST_HEAD_TYPE *vsi_list_head,
5587 struct ice_fltr_info *fi)
5589 struct ice_fltr_list_entry *tmp;
5591 /* this memory is freed up in the caller function
5592 * once filters for this VSI are removed
5594 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5596 return ICE_ERR_NO_MEMORY;
5598 tmp->fltr_info = *fi;
5600 /* Overwrite these fields to indicate which VSI to remove filter from,
5601 * so find and remove logic can extract the information from the
5602 * list entries. Note that original entries will still have proper
5605 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5606 tmp->fltr_info.vsi_handle = vsi_handle;
5607 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5609 LIST_ADD(&tmp->list_entry, vsi_list_head);
5615 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5616 * @hw: pointer to the hardware structure
5617 * @vsi_handle: VSI handle to remove filters from
5618 * @lkup_list_head: pointer to the list that has certain lookup type filters
5619 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5621 * Locates all filters in lkup_list_head that are used by the given VSI,
5622 * and adds COPIES of those entries to vsi_list_head (intended to be used
5623 * to remove the listed filters).
5624 * Note that this means all entries in vsi_list_head must be explicitly
5625 * deallocated by the caller when done with list.
5627 static enum ice_status
5628 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5629 struct LIST_HEAD_TYPE *lkup_list_head,
5630 struct LIST_HEAD_TYPE *vsi_list_head)
5632 struct ice_fltr_mgmt_list_entry *fm_entry;
5633 enum ice_status status = ICE_SUCCESS;
5635 /* check to make sure VSI ID is valid and within boundary */
5636 if (!ice_is_vsi_valid(hw, vsi_handle))
5637 return ICE_ERR_PARAM;
5639 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5640 ice_fltr_mgmt_list_entry, list_entry) {
5641 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5644 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5646 &fm_entry->fltr_info);
5654 * ice_determine_promisc_mask
5655 * @fi: filter info to parse
5657 * Helper function to determine which ICE_PROMISC_ mask corresponds
5658 * to given filter into.
5660 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5662 u16 vid = fi->l_data.mac_vlan.vlan_id;
5663 u8 *macaddr = fi->l_data.mac.mac_addr;
5664 bool is_tx_fltr = false;
5665 u8 promisc_mask = 0;
5667 if (fi->flag == ICE_FLTR_TX)
5670 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5671 promisc_mask |= is_tx_fltr ?
5672 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5673 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5674 promisc_mask |= is_tx_fltr ?
5675 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5676 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5677 promisc_mask |= is_tx_fltr ?
5678 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5680 promisc_mask |= is_tx_fltr ?
5681 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5683 return promisc_mask;
5687 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5688 * @hw: pointer to the hardware structure
5689 * @vsi_handle: VSI handle to retrieve info from
5690 * @promisc_mask: pointer to mask to be filled in
5691 * @vid: VLAN ID of promisc VLAN VSI
5692 * @sw: pointer to switch info struct for which function add rule
5694 static enum ice_status
5695 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5696 u16 *vid, struct ice_switch_info *sw)
5698 struct ice_fltr_mgmt_list_entry *itr;
5699 struct LIST_HEAD_TYPE *rule_head;
5700 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5702 if (!ice_is_vsi_valid(hw, vsi_handle))
5703 return ICE_ERR_PARAM;
5707 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5708 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5710 ice_acquire_lock(rule_lock);
5711 LIST_FOR_EACH_ENTRY(itr, rule_head,
5712 ice_fltr_mgmt_list_entry, list_entry) {
5713 /* Continue if this filter doesn't apply to this VSI or the
5714 * VSI ID is not in the VSI map for this filter
5716 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5719 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5721 ice_release_lock(rule_lock);
5727 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5728 * @hw: pointer to the hardware structure
5729 * @vsi_handle: VSI handle to retrieve info from
5730 * @promisc_mask: pointer to mask to be filled in
5731 * @vid: VLAN ID of promisc VLAN VSI
5734 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5737 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5738 vid, hw->switch_info);
5742 * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5743 * @hw: pointer to the hardware structure
5744 * @vsi_handle: VSI handle to retrieve info from
5745 * @promisc_mask: pointer to mask to be filled in
5746 * @vid: VLAN ID of promisc VLAN VSI
5747 * @sw: pointer to switch info struct for which function add rule
5749 static enum ice_status
5750 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5751 u16 *vid, struct ice_switch_info *sw)
5753 struct ice_fltr_mgmt_list_entry *itr;
5754 struct LIST_HEAD_TYPE *rule_head;
5755 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5757 if (!ice_is_vsi_valid(hw, vsi_handle))
5758 return ICE_ERR_PARAM;
5762 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5763 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5765 ice_acquire_lock(rule_lock);
5766 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5768 /* Continue if this filter doesn't apply to this VSI or the
5769 * VSI ID is not in the VSI map for this filter
5771 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5774 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5776 ice_release_lock(rule_lock);
5782 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5783 * @hw: pointer to the hardware structure
5784 * @vsi_handle: VSI handle to retrieve info from
5785 * @promisc_mask: pointer to mask to be filled in
5786 * @vid: VLAN ID of promisc VLAN VSI
5789 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5792 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5793 vid, hw->switch_info);
5797 * ice_remove_promisc - Remove promisc based filter rules
5798 * @hw: pointer to the hardware structure
5799 * @recp_id: recipe ID for which the rule needs to removed
5800 * @v_list: list of promisc entries
5802 static enum ice_status
5803 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5804 struct LIST_HEAD_TYPE *v_list)
5806 struct ice_fltr_list_entry *v_list_itr, *tmp;
5807 struct ice_sw_recipe *recp_list;
5809 recp_list = &hw->switch_info->recp_list[recp_id];
5810 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5812 v_list_itr->status =
5813 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5814 if (v_list_itr->status)
5815 return v_list_itr->status;
5821 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5822 * @hw: pointer to the hardware structure
5823 * @vsi_handle: VSI handle to clear mode
5824 * @promisc_mask: mask of promiscuous config bits to clear
5825 * @vid: VLAN ID to clear VLAN promiscuous
5826 * @sw: pointer to switch info struct for which function add rule
5828 static enum ice_status
5829 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5830 u16 vid, struct ice_switch_info *sw)
5832 struct ice_fltr_list_entry *fm_entry, *tmp;
5833 struct LIST_HEAD_TYPE remove_list_head;
5834 struct ice_fltr_mgmt_list_entry *itr;
5835 struct LIST_HEAD_TYPE *rule_head;
5836 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5837 enum ice_status status = ICE_SUCCESS;
5840 if (!ice_is_vsi_valid(hw, vsi_handle))
5841 return ICE_ERR_PARAM;
5843 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5844 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5846 recipe_id = ICE_SW_LKUP_PROMISC;
5848 rule_head = &sw->recp_list[recipe_id].filt_rules;
5849 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5851 INIT_LIST_HEAD(&remove_list_head);
5853 ice_acquire_lock(rule_lock);
5854 LIST_FOR_EACH_ENTRY(itr, rule_head,
5855 ice_fltr_mgmt_list_entry, list_entry) {
5856 struct ice_fltr_info *fltr_info;
5857 u8 fltr_promisc_mask = 0;
5859 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5861 fltr_info = &itr->fltr_info;
5863 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5864 vid != fltr_info->l_data.mac_vlan.vlan_id)
5867 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5869 /* Skip if filter is not completely specified by given mask */
5870 if (fltr_promisc_mask & ~promisc_mask)
5873 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5877 ice_release_lock(rule_lock);
5878 goto free_fltr_list;
5881 ice_release_lock(rule_lock);
5883 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5886 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5887 ice_fltr_list_entry, list_entry) {
5888 LIST_DEL(&fm_entry->list_entry);
5889 ice_free(hw, fm_entry);
5896 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5897 * @hw: pointer to the hardware structure
5898 * @vsi_handle: VSI handle to clear mode
5899 * @promisc_mask: mask of promiscuous config bits to clear
5900 * @vid: VLAN ID to clear VLAN promiscuous
5903 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5904 u8 promisc_mask, u16 vid)
5906 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5907 vid, hw->switch_info);
5911 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5912 * @hw: pointer to the hardware structure
5913 * @vsi_handle: VSI handle to configure
5914 * @promisc_mask: mask of promiscuous config bits
5915 * @vid: VLAN ID to set VLAN promiscuous
5916 * @lport: logical port number to configure promisc mode
5917 * @sw: pointer to switch info struct for which function add rule
5919 static enum ice_status
5920 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5921 u16 vid, u8 lport, struct ice_switch_info *sw)
5923 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5924 struct ice_fltr_list_entry f_list_entry;
5925 struct ice_fltr_info new_fltr;
5926 enum ice_status status = ICE_SUCCESS;
5932 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5934 if (!ice_is_vsi_valid(hw, vsi_handle))
5935 return ICE_ERR_PARAM;
5936 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5938 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5940 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5941 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5942 new_fltr.l_data.mac_vlan.vlan_id = vid;
5943 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5945 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5946 recipe_id = ICE_SW_LKUP_PROMISC;
5949 /* Separate filters must be set for each direction/packet type
5950 * combination, so we will loop over the mask value, store the
5951 * individual type, and clear it out in the input mask as it
5954 while (promisc_mask) {
5955 struct ice_sw_recipe *recp_list;
5961 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5962 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5963 pkt_type = UCAST_FLTR;
5964 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5965 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5966 pkt_type = UCAST_FLTR;
5968 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5969 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5970 pkt_type = MCAST_FLTR;
5971 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5972 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5973 pkt_type = MCAST_FLTR;
5975 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5976 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5977 pkt_type = BCAST_FLTR;
5978 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5979 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5980 pkt_type = BCAST_FLTR;
5984 /* Check for VLAN promiscuous flag */
5985 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5986 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5987 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5988 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5992 /* Set filter DA based on packet type */
5993 mac_addr = new_fltr.l_data.mac.mac_addr;
5994 if (pkt_type == BCAST_FLTR) {
5995 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5996 } else if (pkt_type == MCAST_FLTR ||
5997 pkt_type == UCAST_FLTR) {
5998 /* Use the dummy ether header DA */
5999 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
6000 ICE_NONDMA_TO_NONDMA);
6001 if (pkt_type == MCAST_FLTR)
6002 mac_addr[0] |= 0x1; /* Set multicast bit */
6005 /* Need to reset this to zero for all iterations */
6008 new_fltr.flag |= ICE_FLTR_TX;
6009 new_fltr.src = hw_vsi_id;
6011 new_fltr.flag |= ICE_FLTR_RX;
6012 new_fltr.src = lport;
6015 new_fltr.fltr_act = ICE_FWD_TO_VSI;
6016 new_fltr.vsi_handle = vsi_handle;
6017 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6018 f_list_entry.fltr_info = new_fltr;
6019 recp_list = &sw->recp_list[recipe_id];
6021 status = ice_add_rule_internal(hw, recp_list, lport,
6023 if (status != ICE_SUCCESS)
6024 goto set_promisc_exit;
6032 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6033 * @hw: pointer to the hardware structure
6034 * @vsi_handle: VSI handle to configure
6035 * @promisc_mask: mask of promiscuous config bits
6036 * @vid: VLAN ID to set VLAN promiscuous
6039 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6042 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6043 hw->port_info->lport,
6048 * _ice_set_vlan_vsi_promisc
6049 * @hw: pointer to the hardware structure
6050 * @vsi_handle: VSI handle to configure
6051 * @promisc_mask: mask of promiscuous config bits
6052 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6053 * @lport: logical port number to configure promisc mode
6054 * @sw: pointer to switch info struct for which function add rule
6056 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6058 static enum ice_status
6059 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6060 bool rm_vlan_promisc, u8 lport,
6061 struct ice_switch_info *sw)
6063 struct ice_fltr_list_entry *list_itr, *tmp;
6064 struct LIST_HEAD_TYPE vsi_list_head;
6065 struct LIST_HEAD_TYPE *vlan_head;
6066 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6067 enum ice_status status;
6070 INIT_LIST_HEAD(&vsi_list_head);
6071 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6072 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6073 ice_acquire_lock(vlan_lock);
6074 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6076 ice_release_lock(vlan_lock);
6078 goto free_fltr_list;
6080 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6082 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6083 if (rm_vlan_promisc)
6084 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6088 status = _ice_set_vsi_promisc(hw, vsi_handle,
6089 promisc_mask, vlan_id,
6096 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6097 ice_fltr_list_entry, list_entry) {
6098 LIST_DEL(&list_itr->list_entry);
6099 ice_free(hw, list_itr);
6105 * ice_set_vlan_vsi_promisc
6106 * @hw: pointer to the hardware structure
6107 * @vsi_handle: VSI handle to configure
6108 * @promisc_mask: mask of promiscuous config bits
6109 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6111 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6114 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6115 bool rm_vlan_promisc)
6117 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6118 rm_vlan_promisc, hw->port_info->lport,
6123 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6124 * @hw: pointer to the hardware structure
6125 * @vsi_handle: VSI handle to remove filters from
6126 * @recp_list: recipe list from which function remove fltr
6127 * @lkup: switch rule filter lookup type
6130 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6131 struct ice_sw_recipe *recp_list,
6132 enum ice_sw_lkup_type lkup)
6134 struct ice_fltr_list_entry *fm_entry;
6135 struct LIST_HEAD_TYPE remove_list_head;
6136 struct LIST_HEAD_TYPE *rule_head;
6137 struct ice_fltr_list_entry *tmp;
6138 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6139 enum ice_status status;
6141 INIT_LIST_HEAD(&remove_list_head);
6142 rule_lock = &recp_list[lkup].filt_rule_lock;
6143 rule_head = &recp_list[lkup].filt_rules;
6144 ice_acquire_lock(rule_lock);
6145 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6147 ice_release_lock(rule_lock);
6149 goto free_fltr_list;
6152 case ICE_SW_LKUP_MAC:
6153 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6155 case ICE_SW_LKUP_VLAN:
6156 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6158 case ICE_SW_LKUP_PROMISC:
6159 case ICE_SW_LKUP_PROMISC_VLAN:
6160 ice_remove_promisc(hw, lkup, &remove_list_head);
6162 case ICE_SW_LKUP_MAC_VLAN:
6163 ice_remove_mac_vlan(hw, &remove_list_head);
6165 case ICE_SW_LKUP_ETHERTYPE:
6166 case ICE_SW_LKUP_ETHERTYPE_MAC:
6167 ice_remove_eth_mac(hw, &remove_list_head);
6169 case ICE_SW_LKUP_DFLT:
6170 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6172 case ICE_SW_LKUP_LAST:
6173 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6178 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6179 ice_fltr_list_entry, list_entry) {
6180 LIST_DEL(&fm_entry->list_entry);
6181 ice_free(hw, fm_entry);
6186 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6187 * @hw: pointer to the hardware structure
6188 * @vsi_handle: VSI handle to remove filters from
6189 * @sw: pointer to switch info struct
6192 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6193 struct ice_switch_info *sw)
6195 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6197 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6198 sw->recp_list, ICE_SW_LKUP_MAC);
6199 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6200 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6201 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6202 sw->recp_list, ICE_SW_LKUP_PROMISC);
6203 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6204 sw->recp_list, ICE_SW_LKUP_VLAN);
6205 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6206 sw->recp_list, ICE_SW_LKUP_DFLT);
6207 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6208 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6209 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6210 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6211 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6212 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6216 * ice_remove_vsi_fltr - Remove all filters for a VSI
6217 * @hw: pointer to the hardware structure
6218 * @vsi_handle: VSI handle to remove filters from
6220 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6222 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6226 * ice_alloc_res_cntr - allocating resource counter
6227 * @hw: pointer to the hardware structure
6228 * @type: type of resource
6229 * @alloc_shared: if set it is shared else dedicated
6230 * @num_items: number of entries requested for FD resource type
6231 * @counter_id: counter index returned by AQ call
6234 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6237 struct ice_aqc_alloc_free_res_elem *buf;
6238 enum ice_status status;
6241 /* Allocate resource */
6242 buf_len = ice_struct_size(buf, elem, 1);
6243 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6245 return ICE_ERR_NO_MEMORY;
6247 buf->num_elems = CPU_TO_LE16(num_items);
6248 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6249 ICE_AQC_RES_TYPE_M) | alloc_shared);
6251 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6252 ice_aqc_opc_alloc_res, NULL);
6256 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6264 * ice_free_res_cntr - free resource counter
6265 * @hw: pointer to the hardware structure
6266 * @type: type of resource
6267 * @alloc_shared: if set it is shared else dedicated
6268 * @num_items: number of entries to be freed for FD resource type
6269 * @counter_id: counter ID resource which needs to be freed
6272 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6275 struct ice_aqc_alloc_free_res_elem *buf;
6276 enum ice_status status;
6280 buf_len = ice_struct_size(buf, elem, 1);
6281 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6283 return ICE_ERR_NO_MEMORY;
6285 buf->num_elems = CPU_TO_LE16(num_items);
6286 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6287 ICE_AQC_RES_TYPE_M) | alloc_shared);
6288 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6290 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6291 ice_aqc_opc_free_res, NULL);
6293 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6300 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6301 * @hw: pointer to the hardware structure
6302 * @counter_id: returns counter index
6304 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6306 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6307 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6312 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6313 * @hw: pointer to the hardware structure
6314 * @counter_id: counter index to be freed
6316 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6318 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6319 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6324 * ice_alloc_res_lg_act - add large action resource
6325 * @hw: pointer to the hardware structure
6326 * @l_id: large action ID to fill it in
6327 * @num_acts: number of actions to hold with a large action entry
6329 static enum ice_status
6330 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6332 struct ice_aqc_alloc_free_res_elem *sw_buf;
6333 enum ice_status status;
6336 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6337 return ICE_ERR_PARAM;
6339 /* Allocate resource for large action */
6340 buf_len = ice_struct_size(sw_buf, elem, 1);
6341 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6343 return ICE_ERR_NO_MEMORY;
6345 sw_buf->num_elems = CPU_TO_LE16(1);
6347 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6348 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6349 * If num_acts is greater than 2, then use
6350 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6351 * The num_acts cannot exceed 4. This was ensured at the
6352 * beginning of the function.
6355 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6356 else if (num_acts == 2)
6357 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6359 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6361 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6362 ice_aqc_opc_alloc_res, NULL);
6364 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6366 ice_free(hw, sw_buf);
6371 * ice_add_mac_with_sw_marker - add filter with sw marker
6372 * @hw: pointer to the hardware structure
6373 * @f_info: filter info structure containing the MAC filter information
6374 * @sw_marker: sw marker to tag the Rx descriptor with
6377 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6380 struct ice_fltr_mgmt_list_entry *m_entry;
6381 struct ice_fltr_list_entry fl_info;
6382 struct ice_sw_recipe *recp_list;
6383 struct LIST_HEAD_TYPE l_head;
6384 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6385 enum ice_status ret;
6389 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6390 return ICE_ERR_PARAM;
6392 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6393 return ICE_ERR_PARAM;
6395 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6396 return ICE_ERR_PARAM;
6398 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6399 return ICE_ERR_PARAM;
6400 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6402 /* Add filter if it doesn't exist so then the adding of large
6403 * action always results in update
6406 INIT_LIST_HEAD(&l_head);
6407 fl_info.fltr_info = *f_info;
6408 LIST_ADD(&fl_info.list_entry, &l_head);
6410 entry_exists = false;
6411 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6412 hw->port_info->lport);
6413 if (ret == ICE_ERR_ALREADY_EXISTS)
6414 entry_exists = true;
6418 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6419 rule_lock = &recp_list->filt_rule_lock;
6420 ice_acquire_lock(rule_lock);
6421 /* Get the book keeping entry for the filter */
6422 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6426 /* If counter action was enabled for this rule then don't enable
6427 * sw marker large action
6429 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6430 ret = ICE_ERR_PARAM;
6434 /* if same marker was added before */
6435 if (m_entry->sw_marker_id == sw_marker) {
6436 ret = ICE_ERR_ALREADY_EXISTS;
6440 /* Allocate a hardware table entry to hold large act. Three actions
6441 * for marker based large action
6443 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6447 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6450 /* Update the switch rule to add the marker action */
6451 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6453 ice_release_lock(rule_lock);
6458 ice_release_lock(rule_lock);
6459 /* only remove entry if it did not exist previously */
6461 ret = ice_remove_mac(hw, &l_head);
6467 * ice_add_mac_with_counter - add filter with counter enabled
6468 * @hw: pointer to the hardware structure
6469 * @f_info: pointer to filter info structure containing the MAC filter
6473 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6475 struct ice_fltr_mgmt_list_entry *m_entry;
6476 struct ice_fltr_list_entry fl_info;
6477 struct ice_sw_recipe *recp_list;
6478 struct LIST_HEAD_TYPE l_head;
6479 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6480 enum ice_status ret;
6485 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6486 return ICE_ERR_PARAM;
6488 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6489 return ICE_ERR_PARAM;
6491 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6492 return ICE_ERR_PARAM;
6493 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6494 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6496 entry_exist = false;
6498 rule_lock = &recp_list->filt_rule_lock;
6500 /* Add filter if it doesn't exist so then the adding of large
6501 * action always results in update
6503 INIT_LIST_HEAD(&l_head);
6505 fl_info.fltr_info = *f_info;
6506 LIST_ADD(&fl_info.list_entry, &l_head);
6508 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6509 hw->port_info->lport);
6510 if (ret == ICE_ERR_ALREADY_EXISTS)
6515 ice_acquire_lock(rule_lock);
6516 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6518 ret = ICE_ERR_BAD_PTR;
6522 /* Don't enable counter for a filter for which sw marker was enabled */
6523 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6524 ret = ICE_ERR_PARAM;
6528 /* If a counter was already enabled then don't need to add again */
6529 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6530 ret = ICE_ERR_ALREADY_EXISTS;
6534 /* Allocate a hardware table entry to VLAN counter */
6535 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6539 /* Allocate a hardware table entry to hold large act. Two actions for
6540 * counter based large action
6542 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6546 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6549 /* Update the switch rule to add the counter action */
6550 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6552 ice_release_lock(rule_lock);
6557 ice_release_lock(rule_lock);
6558 /* only remove entry if it did not exist previously */
6560 ret = ice_remove_mac(hw, &l_head);
6565 /* This is mapping table entry that maps every word within a given protocol
6566 * structure to the real byte offset as per the specification of that
6568 * for example dst address is 3 words in ethertype header and corresponding
6569 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6570 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6571 * matching entry describing its field. This needs to be updated if new
6572 * structure is added to that union.
6574 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6575 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6576 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6577 { ICE_ETYPE_OL, { 0 } },
6578 { ICE_VLAN_OFOS, { 2, 0 } },
6579 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6580 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6581 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6582 26, 28, 30, 32, 34, 36, 38 } },
6583 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6584 26, 28, 30, 32, 34, 36, 38 } },
6585 { ICE_TCP_IL, { 0, 2 } },
6586 { ICE_UDP_OF, { 0, 2 } },
6587 { ICE_UDP_ILOS, { 0, 2 } },
6588 { ICE_SCTP_IL, { 0, 2 } },
6589 { ICE_VXLAN, { 8, 10, 12, 14 } },
6590 { ICE_GENEVE, { 8, 10, 12, 14 } },
6591 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6592 { ICE_NVGRE, { 0, 2, 4, 6 } },
6593 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6594 { ICE_PPPOE, { 0, 2, 4, 6 } },
6595 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6596 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6597 { ICE_ESP, { 0, 2, 4, 6 } },
6598 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6599 { ICE_NAT_T, { 8, 10, 12, 14 } },
6600 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6601 { ICE_VLAN_EX, { 2, 0 } },
6602 { ICE_VLAN_IN, { 2, 0 } },
6605 /* The following table describes preferred grouping of recipes.
6606 * If a recipe that needs to be programmed is a superset or matches one of the
6607 * following combinations, then the recipe needs to be chained as per the
6611 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6612 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6613 { ICE_MAC_IL, ICE_MAC_IL_HW },
6614 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6615 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6616 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6617 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6618 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6619 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6620 { ICE_TCP_IL, ICE_TCP_IL_HW },
6621 { ICE_UDP_OF, ICE_UDP_OF_HW },
6622 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6623 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6624 { ICE_VXLAN, ICE_UDP_OF_HW },
6625 { ICE_GENEVE, ICE_UDP_OF_HW },
6626 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6627 { ICE_NVGRE, ICE_GRE_OF_HW },
6628 { ICE_GTP, ICE_UDP_OF_HW },
6629 { ICE_PPPOE, ICE_PPPOE_HW },
6630 { ICE_PFCP, ICE_UDP_ILOS_HW },
6631 { ICE_L2TPV3, ICE_L2TPV3_HW },
6632 { ICE_ESP, ICE_ESP_HW },
6633 { ICE_AH, ICE_AH_HW },
6634 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6635 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6636 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6637 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
6641 * ice_find_recp - find a recipe
6642 * @hw: pointer to the hardware structure
6643 * @lkup_exts: extension sequence to match
6645 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6647 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6648 enum ice_sw_tunnel_type tun_type, u32 priority)
6650 bool refresh_required = true;
6651 struct ice_sw_recipe *recp;
6654 /* Walk through existing recipes to find a match */
6655 recp = hw->switch_info->recp_list;
6656 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6657 /* If recipe was not created for this ID, in SW bookkeeping,
6658 * check if FW has an entry for this recipe. If the FW has an
6659 * entry update it in our SW bookkeeping and continue with the
6662 if (!recp[i].recp_created)
6663 if (ice_get_recp_frm_fw(hw,
6664 hw->switch_info->recp_list, i,
6668 /* Skip inverse action recipes */
6669 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6670 ICE_AQ_RECIPE_ACT_INV_ACT)
6673 /* if number of words we are looking for match */
6674 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6675 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6676 struct ice_fv_word *be = lkup_exts->fv_words;
6677 u16 *cr = recp[i].lkup_exts.field_mask;
6678 u16 *de = lkup_exts->field_mask;
6682 /* ar, cr, and qr are related to the recipe words, while
6683 * be, de, and pe are related to the lookup words
6685 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6686 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6688 if (ar[qr].off == be[pe].off &&
6689 ar[qr].prot_id == be[pe].prot_id &&
6691 /* Found the "pe"th word in the
6696 /* After walking through all the words in the
6697 * "i"th recipe if "p"th word was not found then
6698 * this recipe is not what we are looking for.
6699 * So break out from this loop and try the next
6702 if (qr >= recp[i].lkup_exts.n_val_words) {
6707 /* If for "i"th recipe the found was never set to false
6708 * then it means we found our match
6710 if (tun_type == recp[i].tun_type && found &&
6711 priority == recp[i].priority)
6712 return i; /* Return the recipe ID */
6715 return ICE_MAX_NUM_RECIPES;
6719 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6721 * As protocol id for outer vlan is different in dvm and svm, if dvm is
6722 * supported protocol array record for outer vlan has to be modified to
6723 * reflect the value proper for DVM.
6725 void ice_change_proto_id_to_dvm(void)
6729 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6730 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6731 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6732 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6736 * ice_prot_type_to_id - get protocol ID from protocol type
6737 * @type: protocol type
6738 * @id: pointer to variable that will receive the ID
6740 * Returns true if found, false otherwise
6742 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6746 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6747 if (ice_prot_id_tbl[i].type == type) {
6748 *id = ice_prot_id_tbl[i].protocol_id;
6755 * ice_fill_valid_words - count valid words
6756 * @rule: advanced rule with lookup information
6757 * @lkup_exts: byte offset extractions of the words that are valid
6759 * calculate valid words in a lookup rule using mask value
6762 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6763 struct ice_prot_lkup_ext *lkup_exts)
6765 u8 j, word, prot_id, ret_val;
6767 if (!ice_prot_type_to_id(rule->type, &prot_id))
6770 word = lkup_exts->n_val_words;
6772 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6773 if (((u16 *)&rule->m_u)[j] &&
6774 (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6775 /* No more space to accommodate */
6776 if (word >= ICE_MAX_CHAIN_WORDS)
6778 lkup_exts->fv_words[word].off =
6779 ice_prot_ext[rule->type].offs[j];
6780 lkup_exts->fv_words[word].prot_id =
6781 ice_prot_id_tbl[rule->type].protocol_id;
6782 lkup_exts->field_mask[word] =
6783 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6787 ret_val = word - lkup_exts->n_val_words;
6788 lkup_exts->n_val_words = word;
6794 * ice_create_first_fit_recp_def - Create a recipe grouping
6795 * @hw: pointer to the hardware structure
6796 * @lkup_exts: an array of protocol header extractions
6797 * @rg_list: pointer to a list that stores new recipe groups
6798 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6800 * Using first fit algorithm, take all the words that are still not done
6801 * and start grouping them in 4-word groups. Each group makes up one
6804 static enum ice_status
6805 ice_create_first_fit_recp_def(struct ice_hw *hw,
6806 struct ice_prot_lkup_ext *lkup_exts,
6807 struct LIST_HEAD_TYPE *rg_list,
6810 struct ice_pref_recipe_group *grp = NULL;
6815 if (!lkup_exts->n_val_words) {
6816 struct ice_recp_grp_entry *entry;
6818 entry = (struct ice_recp_grp_entry *)
6819 ice_malloc(hw, sizeof(*entry));
6821 return ICE_ERR_NO_MEMORY;
6822 LIST_ADD(&entry->l_entry, rg_list);
6823 grp = &entry->r_group;
6825 grp->n_val_pairs = 0;
6828 /* Walk through every word in the rule to check if it is not done. If so
6829 * then this word needs to be part of a new recipe.
6831 for (j = 0; j < lkup_exts->n_val_words; j++)
6832 if (!ice_is_bit_set(lkup_exts->done, j)) {
6834 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6835 struct ice_recp_grp_entry *entry;
6837 entry = (struct ice_recp_grp_entry *)
6838 ice_malloc(hw, sizeof(*entry));
6840 return ICE_ERR_NO_MEMORY;
6841 LIST_ADD(&entry->l_entry, rg_list);
6842 grp = &entry->r_group;
6846 grp->pairs[grp->n_val_pairs].prot_id =
6847 lkup_exts->fv_words[j].prot_id;
6848 grp->pairs[grp->n_val_pairs].off =
6849 lkup_exts->fv_words[j].off;
6850 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6858 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6859 * @hw: pointer to the hardware structure
6860 * @fv_list: field vector with the extraction sequence information
6861 * @rg_list: recipe groupings with protocol-offset pairs
6863 * Helper function to fill in the field vector indices for protocol-offset
6864 * pairs. These indexes are then ultimately programmed into a recipe.
6866 static enum ice_status
6867 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6868 struct LIST_HEAD_TYPE *rg_list)
6870 struct ice_sw_fv_list_entry *fv;
6871 struct ice_recp_grp_entry *rg;
6872 struct ice_fv_word *fv_ext;
6874 if (LIST_EMPTY(fv_list))
6877 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6878 fv_ext = fv->fv_ptr->ew;
6880 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6883 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6884 struct ice_fv_word *pr;
6889 pr = &rg->r_group.pairs[i];
6890 mask = rg->r_group.mask[i];
6892 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6893 if (fv_ext[j].prot_id == pr->prot_id &&
6894 fv_ext[j].off == pr->off) {
6897 /* Store index of field vector */
6899 rg->fv_mask[i] = mask;
6903 /* Protocol/offset could not be found, caller gave an
6907 return ICE_ERR_PARAM;
6915 * ice_find_free_recp_res_idx - find free result indexes for recipe
6916 * @hw: pointer to hardware structure
6917 * @profiles: bitmap of profiles that will be associated with the new recipe
6918 * @free_idx: pointer to variable to receive the free index bitmap
6920 * The algorithm used here is:
6921 * 1. When creating a new recipe, create a set P which contains all
6922 * Profiles that will be associated with our new recipe
6924 * 2. For each Profile p in set P:
6925 * a. Add all recipes associated with Profile p into set R
6926 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6927 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6928 * i. Or just assume they all have the same possible indexes:
6930 * i.e., PossibleIndexes = 0x0000F00000000000
6932 * 3. For each Recipe r in set R:
6933 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6934 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6936 * FreeIndexes will contain the bits indicating the indexes free for use,
6937 * then the code needs to update the recipe[r].used_result_idx_bits to
6938 * indicate which indexes were selected for use by this recipe.
6941 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6942 ice_bitmap_t *free_idx)
6944 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6945 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6946 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6949 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6950 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6951 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6952 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6954 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6956 /* For each profile we are going to associate the recipe with, add the
6957 * recipes that are associated with that profile. This will give us
6958 * the set of recipes that our recipe may collide with. Also, determine
6959 * what possible result indexes are usable given this set of profiles.
6961 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6962 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6963 ICE_MAX_NUM_RECIPES);
6964 ice_and_bitmap(possible_idx, possible_idx,
6965 hw->switch_info->prof_res_bm[bit],
6969 /* For each recipe that our new recipe may collide with, determine
6970 * which indexes have been used.
6972 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6973 ice_or_bitmap(used_idx, used_idx,
6974 hw->switch_info->recp_list[bit].res_idxs,
6977 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6979 /* return number of free indexes */
6980 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6984 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6985 * @hw: pointer to hardware structure
6986 * @rm: recipe management list entry
6987 * @profiles: bitmap of profiles that will be associated.
6989 static enum ice_status
6990 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6991 ice_bitmap_t *profiles)
6993 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6994 struct ice_aqc_recipe_data_elem *tmp;
6995 struct ice_aqc_recipe_data_elem *buf;
6996 struct ice_recp_grp_entry *entry;
6997 enum ice_status status;
7003 /* When more than one recipe are required, another recipe is needed to
7004 * chain them together. Matching a tunnel metadata ID takes up one of
7005 * the match fields in the chaining recipe reducing the number of
7006 * chained recipes by one.
7008 /* check number of free result indices */
7009 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7010 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7012 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7013 free_res_idx, rm->n_grp_count);
7015 if (rm->n_grp_count > 1) {
7016 if (rm->n_grp_count > free_res_idx)
7017 return ICE_ERR_MAX_LIMIT;
7022 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7023 return ICE_ERR_MAX_LIMIT;
7025 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7026 ICE_MAX_NUM_RECIPES,
7029 return ICE_ERR_NO_MEMORY;
7031 buf = (struct ice_aqc_recipe_data_elem *)
7032 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7034 status = ICE_ERR_NO_MEMORY;
7038 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7039 recipe_count = ICE_MAX_NUM_RECIPES;
7040 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7042 if (status || recipe_count == 0)
7045 /* Allocate the recipe resources, and configure them according to the
7046 * match fields from protocol headers and extracted field vectors.
7048 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7049 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7052 status = ice_alloc_recipe(hw, &entry->rid);
7056 /* Clear the result index of the located recipe, as this will be
7057 * updated, if needed, later in the recipe creation process.
7059 tmp[0].content.result_indx = 0;
7061 buf[recps] = tmp[0];
7062 buf[recps].recipe_indx = (u8)entry->rid;
7063 /* if the recipe is a non-root recipe RID should be programmed
7064 * as 0 for the rules to be applied correctly.
7066 buf[recps].content.rid = 0;
7067 ice_memset(&buf[recps].content.lkup_indx, 0,
7068 sizeof(buf[recps].content.lkup_indx),
7071 /* All recipes use look-up index 0 to match switch ID. */
7072 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7073 buf[recps].content.mask[0] =
7074 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7075 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7078 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7079 buf[recps].content.lkup_indx[i] = 0x80;
7080 buf[recps].content.mask[i] = 0;
7083 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7084 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7085 buf[recps].content.mask[i + 1] =
7086 CPU_TO_LE16(entry->fv_mask[i]);
7089 if (rm->n_grp_count > 1) {
7090 /* Checks to see if there really is a valid result index
7093 if (chain_idx >= ICE_MAX_FV_WORDS) {
7094 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7095 status = ICE_ERR_MAX_LIMIT;
7099 entry->chain_idx = chain_idx;
7100 buf[recps].content.result_indx =
7101 ICE_AQ_RECIPE_RESULT_EN |
7102 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7103 ICE_AQ_RECIPE_RESULT_DATA_M);
7104 ice_clear_bit(chain_idx, result_idx_bm);
7105 chain_idx = ice_find_first_bit(result_idx_bm,
7109 /* fill recipe dependencies */
7110 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7111 ICE_MAX_NUM_RECIPES);
7112 ice_set_bit(buf[recps].recipe_indx,
7113 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7114 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7118 if (rm->n_grp_count == 1) {
7119 rm->root_rid = buf[0].recipe_indx;
7120 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7121 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7122 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7123 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7124 sizeof(buf[0].recipe_bitmap),
7125 ICE_NONDMA_TO_NONDMA);
7127 status = ICE_ERR_BAD_PTR;
7130 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7131 * the recipe which is getting created if specified
7132 * by user. Usually any advanced switch filter, which results
7133 * into new extraction sequence, ended up creating a new recipe
7134 * of type ROOT and usually recipes are associated with profiles
7135 * Switch rule referreing newly created recipe, needs to have
7136 * either/or 'fwd' or 'join' priority, otherwise switch rule
7137 * evaluation will not happen correctly. In other words, if
7138 * switch rule to be evaluated on priority basis, then recipe
7139 * needs to have priority, otherwise it will be evaluated last.
7141 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7143 struct ice_recp_grp_entry *last_chain_entry;
7146 /* Allocate the last recipe that will chain the outcomes of the
7147 * other recipes together
7149 status = ice_alloc_recipe(hw, &rid);
7153 buf[recps].recipe_indx = (u8)rid;
7154 buf[recps].content.rid = (u8)rid;
7155 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7156 /* the new entry created should also be part of rg_list to
7157 * make sure we have complete recipe
7159 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7160 sizeof(*last_chain_entry));
7161 if (!last_chain_entry) {
7162 status = ICE_ERR_NO_MEMORY;
7165 last_chain_entry->rid = rid;
7166 ice_memset(&buf[recps].content.lkup_indx, 0,
7167 sizeof(buf[recps].content.lkup_indx),
7169 /* All recipes use look-up index 0 to match switch ID. */
7170 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7171 buf[recps].content.mask[0] =
7172 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7173 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7174 buf[recps].content.lkup_indx[i] =
7175 ICE_AQ_RECIPE_LKUP_IGNORE;
7176 buf[recps].content.mask[i] = 0;
7180 /* update r_bitmap with the recp that is used for chaining */
7181 ice_set_bit(rid, rm->r_bitmap);
7182 /* this is the recipe that chains all the other recipes so it
7183 * should not have a chaining ID to indicate the same
7185 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7186 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7188 last_chain_entry->fv_idx[i] = entry->chain_idx;
7189 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7190 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7191 ice_set_bit(entry->rid, rm->r_bitmap);
7193 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7194 if (sizeof(buf[recps].recipe_bitmap) >=
7195 sizeof(rm->r_bitmap)) {
7196 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7197 sizeof(buf[recps].recipe_bitmap),
7198 ICE_NONDMA_TO_NONDMA);
7200 status = ICE_ERR_BAD_PTR;
7203 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7206 rm->root_rid = (u8)rid;
7208 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7212 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7213 ice_release_change_lock(hw);
7217 /* Every recipe that just got created add it to the recipe
7220 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7221 struct ice_switch_info *sw = hw->switch_info;
7222 bool is_root, idx_found = false;
7223 struct ice_sw_recipe *recp;
7224 u16 idx, buf_idx = 0;
7226 /* find buffer index for copying some data */
7227 for (idx = 0; idx < rm->n_grp_count; idx++)
7228 if (buf[idx].recipe_indx == entry->rid) {
7234 status = ICE_ERR_OUT_OF_RANGE;
7238 recp = &sw->recp_list[entry->rid];
7239 is_root = (rm->root_rid == entry->rid);
7240 recp->is_root = is_root;
7242 recp->root_rid = entry->rid;
7243 recp->big_recp = (is_root && rm->n_grp_count > 1);
7245 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7246 entry->r_group.n_val_pairs *
7247 sizeof(struct ice_fv_word),
7248 ICE_NONDMA_TO_NONDMA);
7250 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7251 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7253 /* Copy non-result fv index values and masks to recipe. This
7254 * call will also update the result recipe bitmask.
7256 ice_collect_result_idx(&buf[buf_idx], recp);
7258 /* for non-root recipes, also copy to the root, this allows
7259 * easier matching of a complete chained recipe
7262 ice_collect_result_idx(&buf[buf_idx],
7263 &sw->recp_list[rm->root_rid]);
7265 recp->n_ext_words = entry->r_group.n_val_pairs;
7266 recp->chain_idx = entry->chain_idx;
7267 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7268 recp->n_grp_count = rm->n_grp_count;
7269 recp->tun_type = rm->tun_type;
7270 recp->recp_created = true;
7284 * ice_create_recipe_group - creates recipe group
7285 * @hw: pointer to hardware structure
7286 * @rm: recipe management list entry
7287 * @lkup_exts: lookup elements
7289 static enum ice_status
7290 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7291 struct ice_prot_lkup_ext *lkup_exts)
7293 enum ice_status status;
7296 rm->n_grp_count = 0;
7298 /* Create recipes for words that are marked not done by packing them
7301 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7302 &rm->rg_list, &recp_count);
7304 rm->n_grp_count += recp_count;
7305 rm->n_ext_words = lkup_exts->n_val_words;
7306 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7307 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7308 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7309 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7316 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7317 * @hw: pointer to hardware structure
7318 * @lkups: lookup elements or match criteria for the advanced recipe, one
7319 * structure per protocol header
7320 * @lkups_cnt: number of protocols
7321 * @bm: bitmap of field vectors to consider
7322 * @fv_list: pointer to a list that holds the returned field vectors
7324 static enum ice_status
7325 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7326 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7328 enum ice_status status;
7335 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7337 return ICE_ERR_NO_MEMORY;
7339 for (i = 0; i < lkups_cnt; i++)
7340 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7341 status = ICE_ERR_CFG;
7345 /* Find field vectors that include all specified protocol types */
7346 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7349 ice_free(hw, prot_ids);
7354 * ice_tun_type_match_word - determine if tun type needs a match mask
7355 * @tun_type: tunnel type
7356 * @mask: mask to be used for the tunnel
7358 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7361 case ICE_SW_TUN_VXLAN_GPE:
7362 case ICE_SW_TUN_GENEVE:
7363 case ICE_SW_TUN_VXLAN:
7364 case ICE_SW_TUN_NVGRE:
7365 case ICE_SW_TUN_UDP:
7366 case ICE_ALL_TUNNELS:
7367 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7368 case ICE_NON_TUN_QINQ:
7369 case ICE_SW_TUN_PPPOE_QINQ:
7370 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7371 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7372 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7373 *mask = ICE_TUN_FLAG_MASK;
7376 case ICE_SW_TUN_GENEVE_VLAN:
7377 case ICE_SW_TUN_VXLAN_VLAN:
7378 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7388 * ice_add_special_words - Add words that are not protocols, such as metadata
7389 * @rinfo: other information regarding the rule e.g. priority and action info
7390 * @lkup_exts: lookup word structure
7392 static enum ice_status
7393 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7394 struct ice_prot_lkup_ext *lkup_exts)
7398 /* If this is a tunneled packet, then add recipe index to match the
7399 * tunnel bit in the packet metadata flags.
7401 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7402 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7403 u8 word = lkup_exts->n_val_words++;
7405 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7406 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7407 lkup_exts->field_mask[word] = mask;
7409 return ICE_ERR_MAX_LIMIT;
7416 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7417 * @hw: pointer to hardware structure
7418 * @rinfo: other information regarding the rule e.g. priority and action info
7419 * @bm: pointer to memory for returning the bitmap of field vectors
7422 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7425 enum ice_prof_type prof_type;
7427 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7429 switch (rinfo->tun_type) {
7431 case ICE_NON_TUN_QINQ:
7432 prof_type = ICE_PROF_NON_TUN;
7434 case ICE_ALL_TUNNELS:
7435 prof_type = ICE_PROF_TUN_ALL;
7437 case ICE_SW_TUN_VXLAN_GPE:
7438 case ICE_SW_TUN_GENEVE:
7439 case ICE_SW_TUN_GENEVE_VLAN:
7440 case ICE_SW_TUN_VXLAN:
7441 case ICE_SW_TUN_VXLAN_VLAN:
7442 case ICE_SW_TUN_UDP:
7443 case ICE_SW_TUN_GTP:
7444 prof_type = ICE_PROF_TUN_UDP;
7446 case ICE_SW_TUN_NVGRE:
7447 prof_type = ICE_PROF_TUN_GRE;
7449 case ICE_SW_TUN_PPPOE:
7450 case ICE_SW_TUN_PPPOE_QINQ:
7451 prof_type = ICE_PROF_TUN_PPPOE;
7453 case ICE_SW_TUN_PPPOE_PAY:
7454 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7455 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7457 case ICE_SW_TUN_PPPOE_IPV4:
7458 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7459 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7460 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7461 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7463 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7464 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7466 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7467 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7469 case ICE_SW_TUN_PPPOE_IPV6:
7470 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7471 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7472 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7473 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7475 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7476 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7478 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7479 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7481 case ICE_SW_TUN_PROFID_IPV6_ESP:
7482 case ICE_SW_TUN_IPV6_ESP:
7483 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7485 case ICE_SW_TUN_PROFID_IPV6_AH:
7486 case ICE_SW_TUN_IPV6_AH:
7487 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7489 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7490 case ICE_SW_TUN_IPV6_L2TPV3:
7491 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7493 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7494 case ICE_SW_TUN_IPV6_NAT_T:
7495 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7497 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7498 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7500 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7501 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7503 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7504 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7506 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7507 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7509 case ICE_SW_TUN_IPV4_NAT_T:
7510 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7512 case ICE_SW_TUN_IPV4_L2TPV3:
7513 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7515 case ICE_SW_TUN_IPV4_ESP:
7516 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7518 case ICE_SW_TUN_IPV4_AH:
7519 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7521 case ICE_SW_IPV4_TCP:
7522 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7524 case ICE_SW_IPV4_UDP:
7525 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7527 case ICE_SW_IPV6_TCP:
7528 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7530 case ICE_SW_IPV6_UDP:
7531 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7533 case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7534 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7536 case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7537 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7539 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7540 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7541 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7542 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7544 case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7545 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7547 case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7548 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7550 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7551 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7552 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7553 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7555 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7556 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7558 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7559 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7561 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7562 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7563 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7564 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7566 case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7567 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7569 case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7570 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7572 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7573 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7574 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7575 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7577 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7578 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7580 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7581 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7583 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7584 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7585 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7586 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7588 case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7589 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7591 case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7592 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7594 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7595 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7596 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7597 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7599 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7600 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7602 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7603 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7605 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7606 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7607 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7608 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7610 case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7611 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7613 case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7614 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7616 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7617 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7618 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7619 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7621 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7622 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7624 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7625 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7627 case ICE_SW_TUN_AND_NON_TUN:
7628 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7630 prof_type = ICE_PROF_ALL;
7634 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7638 * ice_is_prof_rule - determine if rule type is a profile rule
7639 * @type: the rule type
7641 * if the rule type is a profile rule, that means that there no field value
7642 * match required, in this case just a profile hit is required.
7644 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7647 case ICE_SW_TUN_PROFID_IPV6_ESP:
7648 case ICE_SW_TUN_PROFID_IPV6_AH:
7649 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7650 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7651 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7652 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7653 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7654 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7664 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7665 * @hw: pointer to hardware structure
7666 * @lkups: lookup elements or match criteria for the advanced recipe, one
7667 * structure per protocol header
7668 * @lkups_cnt: number of protocols
7669 * @rinfo: other information regarding the rule e.g. priority and action info
7670 * @rid: return the recipe ID of the recipe created
7672 static enum ice_status
7673 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7674 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7676 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7677 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7678 struct ice_prot_lkup_ext *lkup_exts;
7679 struct ice_recp_grp_entry *r_entry;
7680 struct ice_sw_fv_list_entry *fvit;
7681 struct ice_recp_grp_entry *r_tmp;
7682 struct ice_sw_fv_list_entry *tmp;
7683 enum ice_status status = ICE_SUCCESS;
7684 struct ice_sw_recipe *rm;
7687 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7688 return ICE_ERR_PARAM;
7690 lkup_exts = (struct ice_prot_lkup_ext *)
7691 ice_malloc(hw, sizeof(*lkup_exts));
7693 return ICE_ERR_NO_MEMORY;
7695 /* Determine the number of words to be matched and if it exceeds a
7696 * recipe's restrictions
7698 for (i = 0; i < lkups_cnt; i++) {
7701 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7702 status = ICE_ERR_CFG;
7703 goto err_free_lkup_exts;
7706 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7708 status = ICE_ERR_CFG;
7709 goto err_free_lkup_exts;
7713 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7715 status = ICE_ERR_NO_MEMORY;
7716 goto err_free_lkup_exts;
7719 /* Get field vectors that contain fields extracted from all the protocol
7720 * headers being programmed.
7722 INIT_LIST_HEAD(&rm->fv_list);
7723 INIT_LIST_HEAD(&rm->rg_list);
7725 /* Get bitmap of field vectors (profiles) that are compatible with the
7726 * rule request; only these will be searched in the subsequent call to
7729 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7731 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7735 /* Create any special protocol/offset pairs, such as looking at tunnel
7736 * bits by extracting metadata
7738 status = ice_add_special_words(rinfo, lkup_exts);
7740 goto err_free_lkup_exts;
7742 /* Group match words into recipes using preferred recipe grouping
7745 status = ice_create_recipe_group(hw, rm, lkup_exts);
7749 /* set the recipe priority if specified */
7750 rm->priority = (u8)rinfo->priority;
7752 /* Find offsets from the field vector. Pick the first one for all the
7755 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7759 /* An empty FV list means to use all the profiles returned in the
7762 if (LIST_EMPTY(&rm->fv_list)) {
7765 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7766 struct ice_sw_fv_list_entry *fvl;
7768 fvl = (struct ice_sw_fv_list_entry *)
7769 ice_malloc(hw, sizeof(*fvl));
7773 fvl->profile_id = j;
7774 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7778 /* get bitmap of all profiles the recipe will be associated with */
7779 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7780 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7782 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7783 ice_set_bit((u16)fvit->profile_id, profiles);
7786 /* Look for a recipe which matches our requested fv / mask list */
7787 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7788 if (*rid < ICE_MAX_NUM_RECIPES)
7789 /* Success if found a recipe that match the existing criteria */
7792 rm->tun_type = rinfo->tun_type;
7793 /* Recipe we need does not exist, add a recipe */
7794 status = ice_add_sw_recipe(hw, rm, profiles);
7798 /* Associate all the recipes created with all the profiles in the
7799 * common field vector.
7801 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7803 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7806 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7807 (u8 *)r_bitmap, NULL);
7811 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7812 ICE_MAX_NUM_RECIPES);
7813 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7817 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7820 ice_release_change_lock(hw);
7825 /* Update profile to recipe bitmap array */
7826 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7827 ICE_MAX_NUM_RECIPES);
7829 /* Update recipe to profile bitmap array */
7830 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7831 ice_set_bit((u16)fvit->profile_id,
7832 recipe_to_profile[j]);
7835 *rid = rm->root_rid;
7836 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7837 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7839 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7840 ice_recp_grp_entry, l_entry) {
7841 LIST_DEL(&r_entry->l_entry);
7842 ice_free(hw, r_entry);
7845 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7847 LIST_DEL(&fvit->list_entry);
7852 ice_free(hw, rm->root_buf);
7857 ice_free(hw, lkup_exts);
7863 * ice_find_dummy_packet - find dummy packet by tunnel type
7865 * @lkups: lookup elements or match criteria for the advanced recipe, one
7866 * structure per protocol header
7867 * @lkups_cnt: number of protocols
7868 * @tun_type: tunnel type from the match criteria
7869 * @pkt: dummy packet to fill according to filter match criteria
7870 * @pkt_len: packet length of dummy packet
7871 * @offsets: pointer to receive the pointer to the offsets for the packet
7874 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7875 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7877 const struct ice_dummy_pkt_offsets **offsets)
7879 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7880 bool gre = false, mpls = false;
7883 for (i = 0; i < lkups_cnt; i++) {
7884 if (lkups[i].type == ICE_UDP_ILOS)
7886 else if (lkups[i].type == ICE_TCP_IL)
7888 else if (lkups[i].type == ICE_IPV6_OFOS)
7890 else if (lkups[i].type == ICE_VLAN_OFOS)
7892 else if (lkups[i].type == ICE_ETYPE_OL &&
7893 lkups[i].h_u.ethertype.ethtype_id ==
7894 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7895 lkups[i].m_u.ethertype.ethtype_id ==
7896 CPU_TO_BE16(0xFFFF))
7898 else if (lkups[i].type == ICE_IPV4_OFOS &&
7899 lkups[i].h_u.ipv4_hdr.protocol ==
7900 ICE_IPV4_NVGRE_PROTO_ID &&
7901 lkups[i].m_u.ipv4_hdr.protocol ==
7904 else if (lkups[i].type == ICE_PPPOE &&
7905 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7906 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7907 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7910 else if (lkups[i].type == ICE_IPV4_IL &&
7911 lkups[i].h_u.ipv4_hdr.protocol ==
7913 lkups[i].m_u.ipv4_hdr.protocol ==
7916 else if (lkups[i].type == ICE_ETYPE_OL &&
7917 lkups[i].h_u.ethertype.ethtype_id ==
7918 CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
7919 lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
7923 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7924 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7925 *pkt = dummy_qinq_ipv6_pkt;
7926 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7927 *offsets = dummy_qinq_ipv6_packet_offsets;
7929 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7930 tun_type == ICE_NON_TUN_QINQ) {
7931 *pkt = dummy_qinq_ipv4_pkt;
7932 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7933 *offsets = dummy_qinq_ipv4_packet_offsets;
7937 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7938 *pkt = dummy_qinq_pppoe_ipv6_packet;
7939 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7940 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7942 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7943 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7944 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7945 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7947 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
7948 *pkt = dummy_qinq_pppoe_ipv6_packet;
7949 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7950 *offsets = dummy_qinq_pppoe_packet_offsets;
7952 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7953 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7954 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7955 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7956 *offsets = dummy_qinq_pppoe_packet_offsets;
7960 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7961 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7962 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7963 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7965 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7966 *pkt = dummy_ipv6_gtp_packet;
7967 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
7968 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7972 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7973 *pkt = dummy_ipv4_esp_pkt;
7974 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7975 *offsets = dummy_ipv4_esp_packet_offsets;
7979 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7980 *pkt = dummy_ipv6_esp_pkt;
7981 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7982 *offsets = dummy_ipv6_esp_packet_offsets;
7986 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7987 *pkt = dummy_ipv4_ah_pkt;
7988 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7989 *offsets = dummy_ipv4_ah_packet_offsets;
7993 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7994 *pkt = dummy_ipv6_ah_pkt;
7995 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7996 *offsets = dummy_ipv6_ah_packet_offsets;
8000 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
8001 *pkt = dummy_ipv4_nat_pkt;
8002 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
8003 *offsets = dummy_ipv4_nat_packet_offsets;
8007 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8008 *pkt = dummy_ipv6_nat_pkt;
8009 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
8010 *offsets = dummy_ipv6_nat_packet_offsets;
8014 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8015 *pkt = dummy_ipv4_l2tpv3_pkt;
8016 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8017 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
8021 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8022 *pkt = dummy_ipv6_l2tpv3_pkt;
8023 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8024 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8028 if (tun_type == ICE_SW_TUN_GTP) {
8029 *pkt = dummy_udp_gtp_packet;
8030 *pkt_len = sizeof(dummy_udp_gtp_packet);
8031 *offsets = dummy_udp_gtp_packet_offsets;
8035 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8036 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8037 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8038 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8039 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8043 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8044 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8045 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8046 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8047 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8051 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8052 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8053 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8054 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8055 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8059 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8060 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8061 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8062 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8063 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8067 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8068 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8069 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8070 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8071 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8075 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8076 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8077 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8078 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8079 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8083 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8084 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8085 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8086 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8087 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8091 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8092 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8093 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8094 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8095 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8099 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8100 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8101 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8102 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8103 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8107 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8108 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8109 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8110 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8111 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8115 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8116 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8117 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8118 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8119 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8123 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8124 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8125 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8126 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8127 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8131 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8132 *pkt = dummy_pppoe_ipv6_packet;
8133 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8134 *offsets = dummy_pppoe_packet_offsets;
8136 } else if (tun_type == ICE_SW_TUN_PPPOE ||
8137 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8138 *pkt = dummy_pppoe_ipv4_packet;
8139 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8140 *offsets = dummy_pppoe_packet_offsets;
8144 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8145 *pkt = dummy_pppoe_ipv4_packet;
8146 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8147 *offsets = dummy_pppoe_packet_ipv4_offsets;
8151 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8152 *pkt = dummy_pppoe_ipv4_tcp_packet;
8153 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8154 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8158 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8159 *pkt = dummy_pppoe_ipv4_udp_packet;
8160 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8161 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8165 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8166 *pkt = dummy_pppoe_ipv6_packet;
8167 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8168 *offsets = dummy_pppoe_packet_ipv6_offsets;
8172 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8173 *pkt = dummy_pppoe_ipv6_tcp_packet;
8174 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8175 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8179 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8180 *pkt = dummy_pppoe_ipv6_udp_packet;
8181 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8182 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8186 if (tun_type == ICE_SW_IPV4_TCP) {
8187 *pkt = dummy_tcp_packet;
8188 *pkt_len = sizeof(dummy_tcp_packet);
8189 *offsets = dummy_tcp_packet_offsets;
8193 if (tun_type == ICE_SW_IPV4_UDP) {
8194 *pkt = dummy_udp_packet;
8195 *pkt_len = sizeof(dummy_udp_packet);
8196 *offsets = dummy_udp_packet_offsets;
8200 if (tun_type == ICE_SW_IPV6_TCP) {
8201 *pkt = dummy_tcp_ipv6_packet;
8202 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8203 *offsets = dummy_tcp_ipv6_packet_offsets;
8207 if (tun_type == ICE_SW_IPV6_UDP) {
8208 *pkt = dummy_udp_ipv6_packet;
8209 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8210 *offsets = dummy_udp_ipv6_packet_offsets;
8214 if (tun_type == ICE_ALL_TUNNELS) {
8215 *pkt = dummy_gre_udp_packet;
8216 *pkt_len = sizeof(dummy_gre_udp_packet);
8217 *offsets = dummy_gre_udp_packet_offsets;
8221 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8223 *pkt = dummy_gre_tcp_packet;
8224 *pkt_len = sizeof(dummy_gre_tcp_packet);
8225 *offsets = dummy_gre_tcp_packet_offsets;
8229 *pkt = dummy_gre_udp_packet;
8230 *pkt_len = sizeof(dummy_gre_udp_packet);
8231 *offsets = dummy_gre_udp_packet_offsets;
8235 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8236 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8237 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8238 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8240 *pkt = dummy_udp_tun_tcp_packet;
8241 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8242 *offsets = dummy_udp_tun_tcp_packet_offsets;
8246 *pkt = dummy_udp_tun_udp_packet;
8247 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8248 *offsets = dummy_udp_tun_udp_packet_offsets;
8254 *pkt = dummy_vlan_udp_packet;
8255 *pkt_len = sizeof(dummy_vlan_udp_packet);
8256 *offsets = dummy_vlan_udp_packet_offsets;
8259 *pkt = dummy_udp_packet;
8260 *pkt_len = sizeof(dummy_udp_packet);
8261 *offsets = dummy_udp_packet_offsets;
8263 } else if (udp && ipv6) {
8265 *pkt = dummy_vlan_udp_ipv6_packet;
8266 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8267 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8270 *pkt = dummy_udp_ipv6_packet;
8271 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8272 *offsets = dummy_udp_ipv6_packet_offsets;
8274 } else if ((tcp && ipv6) || ipv6) {
8276 *pkt = dummy_vlan_tcp_ipv6_packet;
8277 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8278 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8281 *pkt = dummy_tcp_ipv6_packet;
8282 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8283 *offsets = dummy_tcp_ipv6_packet_offsets;
8288 *pkt = dummy_vlan_tcp_packet;
8289 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8290 *offsets = dummy_vlan_tcp_packet_offsets;
8292 *pkt = dummy_mpls_packet;
8293 *pkt_len = sizeof(dummy_mpls_packet);
8294 *offsets = dummy_mpls_packet_offsets;
8296 *pkt = dummy_tcp_packet;
8297 *pkt_len = sizeof(dummy_tcp_packet);
8298 *offsets = dummy_tcp_packet_offsets;
8303 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8305 * @lkups: lookup elements or match criteria for the advanced recipe, one
8306 * structure per protocol header
8307 * @lkups_cnt: number of protocols
8308 * @s_rule: stores rule information from the match criteria
8309 * @dummy_pkt: dummy packet to fill according to filter match criteria
8310 * @pkt_len: packet length of dummy packet
8311 * @offsets: offset info for the dummy packet
8313 static enum ice_status
8314 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8315 struct ice_aqc_sw_rules_elem *s_rule,
8316 const u8 *dummy_pkt, u16 pkt_len,
8317 const struct ice_dummy_pkt_offsets *offsets)
8322 /* Start with a packet with a pre-defined/dummy content. Then, fill
8323 * in the header values to be looked up or matched.
8325 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8327 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8329 for (i = 0; i < lkups_cnt; i++) {
8330 enum ice_protocol_type type;
8331 u16 offset = 0, len = 0, j;
8334 /* find the start of this layer; it should be found since this
8335 * was already checked when search for the dummy packet
8337 type = lkups[i].type;
8338 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8339 if (type == offsets[j].type) {
8340 offset = offsets[j].offset;
8345 /* this should never happen in a correct calling sequence */
8347 return ICE_ERR_PARAM;
8349 switch (lkups[i].type) {
8352 len = sizeof(struct ice_ether_hdr);
8355 len = sizeof(struct ice_ethtype_hdr);
8360 len = sizeof(struct ice_vlan_hdr);
8364 len = sizeof(struct ice_ipv4_hdr);
8368 len = sizeof(struct ice_ipv6_hdr);
8373 len = sizeof(struct ice_l4_hdr);
8376 len = sizeof(struct ice_sctp_hdr);
8379 len = sizeof(struct ice_nvgre);
8384 len = sizeof(struct ice_udp_tnl_hdr);
8388 case ICE_GTP_NO_PAY:
8389 len = sizeof(struct ice_udp_gtp_hdr);
8392 len = sizeof(struct ice_pppoe_hdr);
8395 len = sizeof(struct ice_esp_hdr);
8398 len = sizeof(struct ice_nat_t_hdr);
8401 len = sizeof(struct ice_ah_hdr);
8404 len = sizeof(struct ice_l2tpv3_sess_hdr);
8407 return ICE_ERR_PARAM;
8410 /* the length should be a word multiple */
8411 if (len % ICE_BYTES_PER_WORD)
8414 /* We have the offset to the header start, the length, the
8415 * caller's header values and mask. Use this information to
8416 * copy the data into the dummy packet appropriately based on
8417 * the mask. Note that we need to only write the bits as
8418 * indicated by the mask to make sure we don't improperly write
8419 * over any significant packet data.
8421 for (j = 0; j < len / sizeof(u16); j++)
8422 if (((u16 *)&lkups[i].m_u)[j])
8423 ((u16 *)(pkt + offset))[j] =
8424 (((u16 *)(pkt + offset))[j] &
8425 ~((u16 *)&lkups[i].m_u)[j]) |
8426 (((u16 *)&lkups[i].h_u)[j] &
8427 ((u16 *)&lkups[i].m_u)[j]);
8430 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8436 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8437 * @hw: pointer to the hardware structure
8438 * @tun_type: tunnel type
8439 * @pkt: dummy packet to fill in
8440 * @offsets: offset info for the dummy packet
8442 static enum ice_status
8443 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8444 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8449 case ICE_SW_TUN_AND_NON_TUN:
8450 case ICE_SW_TUN_VXLAN_GPE:
8451 case ICE_SW_TUN_VXLAN:
8452 case ICE_SW_TUN_VXLAN_VLAN:
8453 case ICE_SW_TUN_UDP:
8454 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8458 case ICE_SW_TUN_GENEVE:
8459 case ICE_SW_TUN_GENEVE_VLAN:
8460 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8465 /* Nothing needs to be done for this tunnel type */
8469 /* Find the outer UDP protocol header and insert the port number */
8470 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8471 if (offsets[i].type == ICE_UDP_OF) {
8472 struct ice_l4_hdr *hdr;
8475 offset = offsets[i].offset;
8476 hdr = (struct ice_l4_hdr *)&pkt[offset];
8477 hdr->dst_port = CPU_TO_BE16(open_port);
8487 * ice_find_adv_rule_entry - Search a rule entry
8488 * @hw: pointer to the hardware structure
8489 * @lkups: lookup elements or match criteria for the advanced recipe, one
8490 * structure per protocol header
8491 * @lkups_cnt: number of protocols
8492 * @recp_id: recipe ID for which we are finding the rule
8493 * @rinfo: other information regarding the rule e.g. priority and action info
8495 * Helper function to search for a given advance rule entry
8496 * Returns pointer to entry storing the rule if found
8498 static struct ice_adv_fltr_mgmt_list_entry *
8499 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8500 u16 lkups_cnt, u16 recp_id,
8501 struct ice_adv_rule_info *rinfo)
8503 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8504 struct ice_switch_info *sw = hw->switch_info;
8507 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8508 ice_adv_fltr_mgmt_list_entry, list_entry) {
8509 bool lkups_matched = true;
8511 if (lkups_cnt != list_itr->lkups_cnt)
8513 for (i = 0; i < list_itr->lkups_cnt; i++)
8514 if (memcmp(&list_itr->lkups[i], &lkups[i],
8516 lkups_matched = false;
8519 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8520 rinfo->tun_type == list_itr->rule_info.tun_type &&
8528 * ice_adv_add_update_vsi_list
8529 * @hw: pointer to the hardware structure
8530 * @m_entry: pointer to current adv filter management list entry
8531 * @cur_fltr: filter information from the book keeping entry
8532 * @new_fltr: filter information with the new VSI to be added
8534 * Call AQ command to add or update previously created VSI list with new VSI.
8536 * Helper function to do book keeping associated with adding filter information
8537 * The algorithm to do the booking keeping is described below :
8538 * When a VSI needs to subscribe to a given advanced filter
8539 * if only one VSI has been added till now
8540 * Allocate a new VSI list and add two VSIs
8541 * to this list using switch rule command
8542 * Update the previously created switch rule with the
8543 * newly created VSI list ID
8544 * if a VSI list was previously created
8545 * Add the new VSI to the previously created VSI list set
8546 * using the update switch rule command
8548 static enum ice_status
8549 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8550 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8551 struct ice_adv_rule_info *cur_fltr,
8552 struct ice_adv_rule_info *new_fltr)
8554 enum ice_status status;
8555 u16 vsi_list_id = 0;
8557 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8558 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8559 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8560 return ICE_ERR_NOT_IMPL;
8562 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8563 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8564 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8565 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8566 return ICE_ERR_NOT_IMPL;
8568 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8569 /* Only one entry existed in the mapping and it was not already
8570 * a part of a VSI list. So, create a VSI list with the old and
8573 struct ice_fltr_info tmp_fltr;
8574 u16 vsi_handle_arr[2];
8576 /* A rule already exists with the new VSI being added */
8577 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8578 new_fltr->sw_act.fwd_id.hw_vsi_id)
8579 return ICE_ERR_ALREADY_EXISTS;
8581 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8582 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8583 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8589 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8590 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8591 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8592 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8593 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8594 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8596 /* Update the previous switch rule of "forward to VSI" to
8599 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8603 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8604 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8605 m_entry->vsi_list_info =
8606 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8609 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8611 if (!m_entry->vsi_list_info)
8614 /* A rule already exists with the new VSI being added */
8615 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8618 /* Update the previously created VSI list set with
8619 * the new VSI ID passed in
8621 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8623 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8625 ice_aqc_opc_update_sw_rules,
8627 /* update VSI list mapping info with new VSI ID */
8629 ice_set_bit(vsi_handle,
8630 m_entry->vsi_list_info->vsi_map);
8633 m_entry->vsi_count++;
8638 * ice_add_adv_rule - helper function to create an advanced switch rule
8639 * @hw: pointer to the hardware structure
8640 * @lkups: information on the words that needs to be looked up. All words
8641 * together makes one recipe
8642 * @lkups_cnt: num of entries in the lkups array
8643 * @rinfo: other information related to the rule that needs to be programmed
8644 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8645 * ignored is case of error.
8647 * This function can program only 1 rule at a time. The lkups is used to
8648 * describe the all the words that forms the "lookup" portion of the recipe.
8649 * These words can span multiple protocols. Callers to this function need to
8650 * pass in a list of protocol headers with lookup information along and mask
8651 * that determines which words are valid from the given protocol header.
8652 * rinfo describes other information related to this rule such as forwarding
8653 * IDs, priority of this rule, etc.
8656 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8657 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8658 struct ice_rule_query_data *added_entry)
8660 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8661 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8662 const struct ice_dummy_pkt_offsets *pkt_offsets;
8663 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8664 struct LIST_HEAD_TYPE *rule_head;
8665 struct ice_switch_info *sw;
8666 enum ice_status status;
8667 const u8 *pkt = NULL;
8673 /* Initialize profile to result index bitmap */
8674 if (!hw->switch_info->prof_res_bm_init) {
8675 hw->switch_info->prof_res_bm_init = 1;
8676 ice_init_prof_result_bm(hw);
8679 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8680 if (!prof_rule && !lkups_cnt)
8681 return ICE_ERR_PARAM;
8683 /* get # of words we need to match */
8685 for (i = 0; i < lkups_cnt; i++) {
8688 ptr = (u16 *)&lkups[i].m_u;
8689 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8695 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8696 return ICE_ERR_PARAM;
8698 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8699 return ICE_ERR_PARAM;
8702 /* make sure that we can locate a dummy packet */
8703 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8706 status = ICE_ERR_PARAM;
8707 goto err_ice_add_adv_rule;
8710 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8711 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8712 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8713 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8716 vsi_handle = rinfo->sw_act.vsi_handle;
8717 if (!ice_is_vsi_valid(hw, vsi_handle))
8718 return ICE_ERR_PARAM;
8720 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8721 rinfo->sw_act.fwd_id.hw_vsi_id =
8722 ice_get_hw_vsi_num(hw, vsi_handle);
8723 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8724 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8726 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8729 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8731 /* we have to add VSI to VSI_LIST and increment vsi_count.
8732 * Also Update VSI list so that we can change forwarding rule
8733 * if the rule already exists, we will check if it exists with
8734 * same vsi_id, if not then add it to the VSI list if it already
8735 * exists if not then create a VSI list and add the existing VSI
8736 * ID and the new VSI ID to the list
8737 * We will add that VSI to the list
8739 status = ice_adv_add_update_vsi_list(hw, m_entry,
8740 &m_entry->rule_info,
8743 added_entry->rid = rid;
8744 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8745 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8749 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8750 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8752 return ICE_ERR_NO_MEMORY;
8753 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8754 switch (rinfo->sw_act.fltr_act) {
8755 case ICE_FWD_TO_VSI:
8756 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8757 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8758 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8761 act |= ICE_SINGLE_ACT_TO_Q;
8762 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8763 ICE_SINGLE_ACT_Q_INDEX_M;
8765 case ICE_FWD_TO_QGRP:
8766 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8767 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8768 act |= ICE_SINGLE_ACT_TO_Q;
8769 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8770 ICE_SINGLE_ACT_Q_INDEX_M;
8771 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8772 ICE_SINGLE_ACT_Q_REGION_M;
8774 case ICE_DROP_PACKET:
8775 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8776 ICE_SINGLE_ACT_VALID_BIT;
8779 status = ICE_ERR_CFG;
8780 goto err_ice_add_adv_rule;
8783 /* set the rule LOOKUP type based on caller specified 'RX'
8784 * instead of hardcoding it to be either LOOKUP_TX/RX
8786 * for 'RX' set the source to be the port number
8787 * for 'TX' set the source to be the source HW VSI number (determined
8791 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8792 s_rule->pdata.lkup_tx_rx.src =
8793 CPU_TO_LE16(hw->port_info->lport);
8795 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8796 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8799 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8800 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8802 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8803 pkt_len, pkt_offsets);
8805 goto err_ice_add_adv_rule;
8807 if (rinfo->tun_type != ICE_NON_TUN &&
8808 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8809 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8810 s_rule->pdata.lkup_tx_rx.hdr,
8813 goto err_ice_add_adv_rule;
8816 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8817 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8820 goto err_ice_add_adv_rule;
8821 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8822 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8824 status = ICE_ERR_NO_MEMORY;
8825 goto err_ice_add_adv_rule;
8828 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8829 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8830 ICE_NONDMA_TO_NONDMA);
8831 if (!adv_fltr->lkups && !prof_rule) {
8832 status = ICE_ERR_NO_MEMORY;
8833 goto err_ice_add_adv_rule;
8836 adv_fltr->lkups_cnt = lkups_cnt;
8837 adv_fltr->rule_info = *rinfo;
8838 adv_fltr->rule_info.fltr_rule_id =
8839 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8840 sw = hw->switch_info;
8841 sw->recp_list[rid].adv_rule = true;
8842 rule_head = &sw->recp_list[rid].filt_rules;
8844 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8845 adv_fltr->vsi_count = 1;
8847 /* Add rule entry to book keeping list */
8848 LIST_ADD(&adv_fltr->list_entry, rule_head);
8850 added_entry->rid = rid;
8851 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8852 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8854 err_ice_add_adv_rule:
8855 if (status && adv_fltr) {
8856 ice_free(hw, adv_fltr->lkups);
8857 ice_free(hw, adv_fltr);
8860 ice_free(hw, s_rule);
8866 * ice_adv_rem_update_vsi_list
8867 * @hw: pointer to the hardware structure
8868 * @vsi_handle: VSI handle of the VSI to remove
8869 * @fm_list: filter management entry for which the VSI list management needs to
8872 static enum ice_status
8873 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8874 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8876 struct ice_vsi_list_map_info *vsi_list_info;
8877 enum ice_sw_lkup_type lkup_type;
8878 enum ice_status status;
8881 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8882 fm_list->vsi_count == 0)
8883 return ICE_ERR_PARAM;
8885 /* A rule with the VSI being removed does not exist */
8886 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8887 return ICE_ERR_DOES_NOT_EXIST;
8889 lkup_type = ICE_SW_LKUP_LAST;
8890 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8891 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8892 ice_aqc_opc_update_sw_rules,
8897 fm_list->vsi_count--;
8898 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8899 vsi_list_info = fm_list->vsi_list_info;
8900 if (fm_list->vsi_count == 1) {
8901 struct ice_fltr_info tmp_fltr;
8904 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8906 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8907 return ICE_ERR_OUT_OF_RANGE;
8909 /* Make sure VSI list is empty before removing it below */
8910 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8912 ice_aqc_opc_update_sw_rules,
8917 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8918 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8919 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8920 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8921 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8922 tmp_fltr.fwd_id.hw_vsi_id =
8923 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8924 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8925 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8926 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8928 /* Update the previous switch rule of "MAC forward to VSI" to
8929 * "MAC fwd to VSI list"
8931 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8933 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8934 tmp_fltr.fwd_id.hw_vsi_id, status);
8937 fm_list->vsi_list_info->ref_cnt--;
8939 /* Remove the VSI list since it is no longer used */
8940 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8942 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8943 vsi_list_id, status);
8947 LIST_DEL(&vsi_list_info->list_entry);
8948 ice_free(hw, vsi_list_info);
8949 fm_list->vsi_list_info = NULL;
8956 * ice_rem_adv_rule - removes existing advanced switch rule
8957 * @hw: pointer to the hardware structure
8958 * @lkups: information on the words that needs to be looked up. All words
8959 * together makes one recipe
8960 * @lkups_cnt: num of entries in the lkups array
8961 * @rinfo: Its the pointer to the rule information for the rule
8963 * This function can be used to remove 1 rule at a time. The lkups is
8964 * used to describe all the words that forms the "lookup" portion of the
8965 * rule. These words can span multiple protocols. Callers to this function
8966 * need to pass in a list of protocol headers with lookup information along
8967 * and mask that determines which words are valid from the given protocol
8968 * header. rinfo describes other information related to this rule such as
8969 * forwarding IDs, priority of this rule, etc.
8972 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8973 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8975 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8976 struct ice_prot_lkup_ext lkup_exts;
8977 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8978 enum ice_status status = ICE_SUCCESS;
8979 bool remove_rule = false;
8980 u16 i, rid, vsi_handle;
8982 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8983 for (i = 0; i < lkups_cnt; i++) {
8986 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8989 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8994 /* Create any special protocol/offset pairs, such as looking at tunnel
8995 * bits by extracting metadata
8997 status = ice_add_special_words(rinfo, &lkup_exts);
9001 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9002 /* If did not find a recipe that match the existing criteria */
9003 if (rid == ICE_MAX_NUM_RECIPES)
9004 return ICE_ERR_PARAM;
9006 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9007 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9008 /* the rule is already removed */
9011 ice_acquire_lock(rule_lock);
9012 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9014 } else if (list_elem->vsi_count > 1) {
9015 remove_rule = false;
9016 vsi_handle = rinfo->sw_act.vsi_handle;
9017 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9019 vsi_handle = rinfo->sw_act.vsi_handle;
9020 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9022 ice_release_lock(rule_lock);
9025 if (list_elem->vsi_count == 0)
9028 ice_release_lock(rule_lock);
9030 struct ice_aqc_sw_rules_elem *s_rule;
9033 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9034 s_rule = (struct ice_aqc_sw_rules_elem *)
9035 ice_malloc(hw, rule_buf_sz);
9037 return ICE_ERR_NO_MEMORY;
9038 s_rule->pdata.lkup_tx_rx.act = 0;
9039 s_rule->pdata.lkup_tx_rx.index =
9040 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9041 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9042 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9044 ice_aqc_opc_remove_sw_rules, NULL);
9045 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9046 struct ice_switch_info *sw = hw->switch_info;
9048 ice_acquire_lock(rule_lock);
9049 LIST_DEL(&list_elem->list_entry);
9050 ice_free(hw, list_elem->lkups);
9051 ice_free(hw, list_elem);
9052 ice_release_lock(rule_lock);
9053 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9054 sw->recp_list[rid].adv_rule = false;
9056 ice_free(hw, s_rule);
9062 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9063 * @hw: pointer to the hardware structure
9064 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9066 * This function is used to remove 1 rule at a time. The removal is based on
9067 * the remove_entry parameter. This function will remove rule for a given
9068 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9071 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9072 struct ice_rule_query_data *remove_entry)
9074 struct ice_adv_fltr_mgmt_list_entry *list_itr;
9075 struct LIST_HEAD_TYPE *list_head;
9076 struct ice_adv_rule_info rinfo;
9077 struct ice_switch_info *sw;
9079 sw = hw->switch_info;
9080 if (!sw->recp_list[remove_entry->rid].recp_created)
9081 return ICE_ERR_PARAM;
9082 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9083 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9085 if (list_itr->rule_info.fltr_rule_id ==
9086 remove_entry->rule_id) {
9087 rinfo = list_itr->rule_info;
9088 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9089 return ice_rem_adv_rule(hw, list_itr->lkups,
9090 list_itr->lkups_cnt, &rinfo);
9093 /* either list is empty or unable to find rule */
9094 return ICE_ERR_DOES_NOT_EXIST;
9098 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9100 * @hw: pointer to the hardware structure
9101 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9103 * This function is used to remove all the rules for a given VSI and as soon
9104 * as removing a rule fails, it will return immediately with the error code,
9105 * else it will return ICE_SUCCESS
9107 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9109 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9110 struct ice_vsi_list_map_info *map_info;
9111 struct LIST_HEAD_TYPE *list_head;
9112 struct ice_adv_rule_info rinfo;
9113 struct ice_switch_info *sw;
9114 enum ice_status status;
9117 sw = hw->switch_info;
9118 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9119 if (!sw->recp_list[rid].recp_created)
9121 if (!sw->recp_list[rid].adv_rule)
9124 list_head = &sw->recp_list[rid].filt_rules;
9125 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9126 ice_adv_fltr_mgmt_list_entry,
9128 rinfo = list_itr->rule_info;
9130 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9131 map_info = list_itr->vsi_list_info;
9135 if (!ice_is_bit_set(map_info->vsi_map,
9138 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9142 rinfo.sw_act.vsi_handle = vsi_handle;
9143 status = ice_rem_adv_rule(hw, list_itr->lkups,
9144 list_itr->lkups_cnt, &rinfo);
9154 * ice_replay_fltr - Replay all the filters stored by a specific list head
9155 * @hw: pointer to the hardware structure
9156 * @list_head: list for which filters needs to be replayed
9157 * @recp_id: Recipe ID for which rules need to be replayed
9159 static enum ice_status
9160 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9162 struct ice_fltr_mgmt_list_entry *itr;
9163 enum ice_status status = ICE_SUCCESS;
9164 struct ice_sw_recipe *recp_list;
9165 u8 lport = hw->port_info->lport;
9166 struct LIST_HEAD_TYPE l_head;
9168 if (LIST_EMPTY(list_head))
9171 recp_list = &hw->switch_info->recp_list[recp_id];
9172 /* Move entries from the given list_head to a temporary l_head so that
9173 * they can be replayed. Otherwise when trying to re-add the same
9174 * filter, the function will return already exists
9176 LIST_REPLACE_INIT(list_head, &l_head);
9178 /* Mark the given list_head empty by reinitializing it so filters
9179 * could be added again by *handler
9181 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9183 struct ice_fltr_list_entry f_entry;
9186 f_entry.fltr_info = itr->fltr_info;
9187 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9188 status = ice_add_rule_internal(hw, recp_list, lport,
9190 if (status != ICE_SUCCESS)
9195 /* Add a filter per VSI separately */
9196 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9198 if (!ice_is_vsi_valid(hw, vsi_handle))
9201 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9202 f_entry.fltr_info.vsi_handle = vsi_handle;
9203 f_entry.fltr_info.fwd_id.hw_vsi_id =
9204 ice_get_hw_vsi_num(hw, vsi_handle);
9205 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9206 if (recp_id == ICE_SW_LKUP_VLAN)
9207 status = ice_add_vlan_internal(hw, recp_list,
9210 status = ice_add_rule_internal(hw, recp_list,
9213 if (status != ICE_SUCCESS)
9218 /* Clear the filter management list */
9219 ice_rem_sw_rule_info(hw, &l_head);
9224 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9225 * @hw: pointer to the hardware structure
9227 * NOTE: This function does not clean up partially added filters on error.
9228 * It is up to caller of the function to issue a reset or fail early.
9230 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9232 struct ice_switch_info *sw = hw->switch_info;
9233 enum ice_status status = ICE_SUCCESS;
9236 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9237 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9239 status = ice_replay_fltr(hw, i, head);
9240 if (status != ICE_SUCCESS)
9247 * ice_replay_vsi_fltr - Replay filters for requested VSI
9248 * @hw: pointer to the hardware structure
9249 * @pi: pointer to port information structure
9250 * @sw: pointer to switch info struct for which function replays filters
9251 * @vsi_handle: driver VSI handle
9252 * @recp_id: Recipe ID for which rules need to be replayed
9253 * @list_head: list for which filters need to be replayed
9255 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9256 * It is required to pass valid VSI handle.
9258 static enum ice_status
9259 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9260 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9261 struct LIST_HEAD_TYPE *list_head)
9263 struct ice_fltr_mgmt_list_entry *itr;
9264 enum ice_status status = ICE_SUCCESS;
9265 struct ice_sw_recipe *recp_list;
9268 if (LIST_EMPTY(list_head))
9270 recp_list = &sw->recp_list[recp_id];
9271 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9273 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9275 struct ice_fltr_list_entry f_entry;
9277 f_entry.fltr_info = itr->fltr_info;
9278 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9279 itr->fltr_info.vsi_handle == vsi_handle) {
9280 /* update the src in case it is VSI num */
9281 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9282 f_entry.fltr_info.src = hw_vsi_id;
9283 status = ice_add_rule_internal(hw, recp_list,
9286 if (status != ICE_SUCCESS)
9290 if (!itr->vsi_list_info ||
9291 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9293 /* Clearing it so that the logic can add it back */
9294 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9295 f_entry.fltr_info.vsi_handle = vsi_handle;
9296 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9297 /* update the src in case it is VSI num */
9298 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9299 f_entry.fltr_info.src = hw_vsi_id;
9300 if (recp_id == ICE_SW_LKUP_VLAN)
9301 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9303 status = ice_add_rule_internal(hw, recp_list,
9306 if (status != ICE_SUCCESS)
9314 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9315 * @hw: pointer to the hardware structure
9316 * @vsi_handle: driver VSI handle
9317 * @list_head: list for which filters need to be replayed
9319 * Replay the advanced rule for the given VSI.
9321 static enum ice_status
9322 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9323 struct LIST_HEAD_TYPE *list_head)
9325 struct ice_rule_query_data added_entry = { 0 };
9326 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9327 enum ice_status status = ICE_SUCCESS;
9329 if (LIST_EMPTY(list_head))
9331 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9333 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9334 u16 lk_cnt = adv_fltr->lkups_cnt;
9336 if (vsi_handle != rinfo->sw_act.vsi_handle)
9338 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9347 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9348 * @hw: pointer to the hardware structure
9349 * @pi: pointer to port information structure
9350 * @vsi_handle: driver VSI handle
9352 * Replays filters for requested VSI via vsi_handle.
9355 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9358 struct ice_switch_info *sw = hw->switch_info;
9359 enum ice_status status;
9362 /* Update the recipes that were created */
9363 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9364 struct LIST_HEAD_TYPE *head;
9366 head = &sw->recp_list[i].filt_replay_rules;
9367 if (!sw->recp_list[i].adv_rule)
9368 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9371 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9372 if (status != ICE_SUCCESS)
9380 * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9381 * @hw: pointer to the HW struct
9382 * @sw: pointer to switch info struct for which function removes filters
9384 * Deletes the filter replay rules for given switch
9386 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9393 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9394 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9395 struct LIST_HEAD_TYPE *l_head;
9397 l_head = &sw->recp_list[i].filt_replay_rules;
9398 if (!sw->recp_list[i].adv_rule)
9399 ice_rem_sw_rule_info(hw, l_head);
9401 ice_rem_adv_rule_info(hw, l_head);
9407 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9408 * @hw: pointer to the HW struct
9410 * Deletes the filter replay rules.
9412 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9414 ice_rm_sw_replay_rule_info(hw, hw->switch_info);