1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
17 #define ICE_GTPU_PROFILE 24
18 #define ICE_ETH_P_8021Q 0x8100
20 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
21 * struct to configure any switch filter rules.
22 * {DA (6 bytes), SA(6 bytes),
23 * Ether type (2 bytes for header without VLAN tag) OR
24 * VLAN tag (4 bytes for header with VLAN tag) }
26 * Word on Hardcoded values
27 * byte 0 = 0x2: to identify it as locally administered DA MAC
28 * byte 6 = 0x2: to identify it as locally administered SA MAC
29 * byte 12 = 0x81 & byte 13 = 0x00:
30 * In case of VLAN filter first two bytes defines ether type (0x8100)
31 * and remaining two bytes are placeholder for programming a given VLAN ID
32 * In case of Ether type filter it is treated as header without VLAN tag
33 * and byte 12 and 13 is used to program a given Ether type instead
35 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
39 struct ice_dummy_pkt_offsets {
40 enum ice_protocol_type type;
41 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
44 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
47 { ICE_IPV4_OFOS, 14 },
52 { ICE_PROTOCOL_LAST, 0 },
55 static const u8 dummy_gre_tcp_packet[] = {
56 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
57 0x00, 0x00, 0x00, 0x00,
58 0x00, 0x00, 0x00, 0x00,
60 0x08, 0x00, /* ICE_ETYPE_OL 12 */
62 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x2F, 0x00, 0x00,
65 0x00, 0x00, 0x00, 0x00,
66 0x00, 0x00, 0x00, 0x00,
68 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
69 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
72 0x00, 0x00, 0x00, 0x00,
73 0x00, 0x00, 0x00, 0x00,
76 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x06, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x50, 0x02, 0x20, 0x00,
86 0x00, 0x00, 0x00, 0x00
89 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
92 { ICE_IPV4_OFOS, 14 },
97 { ICE_PROTOCOL_LAST, 0 },
100 static const u8 dummy_gre_udp_packet[] = {
101 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
105 0x08, 0x00, /* ICE_ETYPE_OL 12 */
107 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x2F, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
111 0x00, 0x00, 0x00, 0x00,
113 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
114 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
117 0x00, 0x00, 0x00, 0x00,
118 0x00, 0x00, 0x00, 0x00,
121 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x11, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
128 0x00, 0x08, 0x00, 0x00,
131 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
133 { ICE_ETYPE_OL, 12 },
134 { ICE_IPV4_OFOS, 14 },
138 { ICE_VXLAN_GPE, 42 },
142 { ICE_PROTOCOL_LAST, 0 },
145 static const u8 dummy_udp_tun_tcp_packet[] = {
146 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
147 0x00, 0x00, 0x00, 0x00,
148 0x00, 0x00, 0x00, 0x00,
150 0x08, 0x00, /* ICE_ETYPE_OL 12 */
152 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
153 0x00, 0x01, 0x00, 0x00,
154 0x40, 0x11, 0x00, 0x00,
155 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00,
158 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
159 0x00, 0x46, 0x00, 0x00,
161 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
162 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
165 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
169 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
170 0x00, 0x01, 0x00, 0x00,
171 0x40, 0x06, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x50, 0x02, 0x20, 0x00,
179 0x00, 0x00, 0x00, 0x00
182 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
184 { ICE_ETYPE_OL, 12 },
185 { ICE_IPV4_OFOS, 14 },
189 { ICE_VXLAN_GPE, 42 },
192 { ICE_UDP_ILOS, 84 },
193 { ICE_PROTOCOL_LAST, 0 },
196 static const u8 dummy_udp_tun_udp_packet[] = {
197 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
201 0x08, 0x00, /* ICE_ETYPE_OL 12 */
203 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
204 0x00, 0x01, 0x00, 0x00,
205 0x00, 0x11, 0x00, 0x00,
206 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
209 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
210 0x00, 0x3a, 0x00, 0x00,
212 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
213 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
216 0x00, 0x00, 0x00, 0x00,
217 0x00, 0x00, 0x00, 0x00,
220 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
221 0x00, 0x01, 0x00, 0x00,
222 0x00, 0x11, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00,
226 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
227 0x00, 0x08, 0x00, 0x00,
230 /* offset info for MAC + IPv4 + UDP dummy packet */
231 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
233 { ICE_ETYPE_OL, 12 },
234 { ICE_IPV4_OFOS, 14 },
235 { ICE_UDP_ILOS, 34 },
236 { ICE_PROTOCOL_LAST, 0 },
239 /* Dummy packet for MAC + IPv4 + UDP */
240 static const u8 dummy_udp_packet[] = {
241 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
242 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00,
245 0x08, 0x00, /* ICE_ETYPE_OL 12 */
247 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
248 0x00, 0x01, 0x00, 0x00,
249 0x00, 0x11, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00,
253 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
254 0x00, 0x08, 0x00, 0x00,
256 0x00, 0x00, /* 2 bytes for 4 byte alignment */
259 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
260 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
262 { ICE_ETYPE_OL, 12 },
263 { ICE_VLAN_OFOS, 14 },
264 { ICE_IPV4_OFOS, 18 },
265 { ICE_UDP_ILOS, 38 },
266 { ICE_PROTOCOL_LAST, 0 },
269 /* C-tag (801.1Q), IPv4:UDP dummy packet */
270 static const u8 dummy_vlan_udp_packet[] = {
271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
272 0x00, 0x00, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00,
275 0x81, 0x00, /* ICE_ETYPE_OL 12 */
277 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
279 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
280 0x00, 0x01, 0x00, 0x00,
281 0x00, 0x11, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00,
285 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
286 0x00, 0x08, 0x00, 0x00,
288 0x00, 0x00, /* 2 bytes for 4 byte alignment */
291 /* offset info for MAC + IPv4 + TCP dummy packet */
292 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
294 { ICE_ETYPE_OL, 12 },
295 { ICE_IPV4_OFOS, 14 },
297 { ICE_PROTOCOL_LAST, 0 },
300 /* Dummy packet for MAC + IPv4 + TCP */
301 static const u8 dummy_tcp_packet[] = {
302 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
303 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00,
306 0x08, 0x00, /* ICE_ETYPE_OL 12 */
308 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
309 0x00, 0x01, 0x00, 0x00,
310 0x00, 0x06, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
315 0x00, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
317 0x50, 0x00, 0x00, 0x00,
318 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, /* 2 bytes for 4 byte alignment */
323 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
324 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
326 { ICE_ETYPE_OL, 12 },
327 { ICE_VLAN_OFOS, 14 },
328 { ICE_IPV4_OFOS, 18 },
330 { ICE_PROTOCOL_LAST, 0 },
333 /* C-tag (801.1Q), IPv4:TCP dummy packet */
334 static const u8 dummy_vlan_tcp_packet[] = {
335 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
336 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
339 0x81, 0x00, /* ICE_ETYPE_OL 12 */
341 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
343 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
344 0x00, 0x01, 0x00, 0x00,
345 0x00, 0x06, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
350 0x00, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
352 0x50, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, /* 2 bytes for 4 byte alignment */
358 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
360 { ICE_ETYPE_OL, 12 },
361 { ICE_IPV6_OFOS, 14 },
363 { ICE_PROTOCOL_LAST, 0 },
366 static const u8 dummy_tcp_ipv6_packet[] = {
367 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
373 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
374 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
385 0x00, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
387 0x50, 0x00, 0x00, 0x00,
388 0x00, 0x00, 0x00, 0x00,
390 0x00, 0x00, /* 2 bytes for 4 byte alignment */
393 /* C-tag (802.1Q): IPv6 + TCP */
394 static const struct ice_dummy_pkt_offsets
395 dummy_vlan_tcp_ipv6_packet_offsets[] = {
397 { ICE_ETYPE_OL, 12 },
398 { ICE_VLAN_OFOS, 14 },
399 { ICE_IPV6_OFOS, 18 },
401 { ICE_PROTOCOL_LAST, 0 },
404 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
405 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
406 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
407 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x00, 0x00,
410 0x81, 0x00, /* ICE_ETYPE_OL 12 */
412 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
414 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
415 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
422 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
428 0x50, 0x00, 0x00, 0x00,
429 0x00, 0x00, 0x00, 0x00,
431 0x00, 0x00, /* 2 bytes for 4 byte alignment */
435 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
437 { ICE_ETYPE_OL, 12 },
438 { ICE_IPV6_OFOS, 14 },
439 { ICE_UDP_ILOS, 54 },
440 { ICE_PROTOCOL_LAST, 0 },
443 /* IPv6 + UDP dummy packet */
444 static const u8 dummy_udp_ipv6_packet[] = {
445 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
446 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
449 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
451 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
452 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
459 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
463 0x00, 0x10, 0x00, 0x00,
465 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
466 0x00, 0x00, 0x00, 0x00,
468 0x00, 0x00, /* 2 bytes for 4 byte alignment */
471 /* C-tag (802.1Q): IPv6 + UDP */
472 static const struct ice_dummy_pkt_offsets
473 dummy_vlan_udp_ipv6_packet_offsets[] = {
475 { ICE_ETYPE_OL, 12 },
476 { ICE_VLAN_OFOS, 14 },
477 { ICE_IPV6_OFOS, 18 },
478 { ICE_UDP_ILOS, 58 },
479 { ICE_PROTOCOL_LAST, 0 },
482 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
483 static const u8 dummy_vlan_udp_ipv6_packet[] = {
484 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
485 0x00, 0x00, 0x00, 0x00,
486 0x00, 0x00, 0x00, 0x00,
488 0x81, 0x00, /* ICE_ETYPE_OL 12 */
490 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
492 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
493 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00,
503 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
504 0x00, 0x08, 0x00, 0x00,
506 0x00, 0x00, /* 2 bytes for 4 byte alignment */
509 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
510 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
512 { ICE_IPV4_OFOS, 14 },
517 { ICE_PROTOCOL_LAST, 0 },
520 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
521 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
526 0x45, 0x00, 0x00, 0x58, /* IP 14 */
527 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x11, 0x00, 0x00,
529 0x00, 0x00, 0x00, 0x00,
530 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
533 0x00, 0x44, 0x00, 0x00,
535 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
536 0x00, 0x00, 0x00, 0x00,
537 0x00, 0x00, 0x00, 0x85,
539 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
540 0x00, 0x00, 0x00, 0x00,
542 0x45, 0x00, 0x00, 0x28, /* IP 62 */
543 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x06, 0x00, 0x00,
545 0x00, 0x00, 0x00, 0x00,
546 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
551 0x50, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x00, 0x00,
554 0x00, 0x00, /* 2 bytes for 4 byte alignment */
557 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
558 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
560 { ICE_IPV4_OFOS, 14 },
564 { ICE_UDP_ILOS, 82 },
565 { ICE_PROTOCOL_LAST, 0 },
568 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
569 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
574 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
575 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x11, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
581 0x00, 0x38, 0x00, 0x00,
583 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x85,
587 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
588 0x00, 0x00, 0x00, 0x00,
590 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x11, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, 0x00, 0x00,
596 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
597 0x00, 0x08, 0x00, 0x00,
599 0x00, 0x00, /* 2 bytes for 4 byte alignment */
602 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
603 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
605 { ICE_IPV4_OFOS, 14 },
610 { ICE_PROTOCOL_LAST, 0 },
613 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
614 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
615 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00,
619 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x11, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x00, 0x00,
625 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
626 0x00, 0x58, 0x00, 0x00,
628 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
629 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x85,
632 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
633 0x00, 0x00, 0x00, 0x00,
635 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
636 0x00, 0x14, 0x06, 0x00,
637 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x50, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x00,
652 0x00, 0x00, /* 2 bytes for 4 byte alignment */
655 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
657 { ICE_IPV4_OFOS, 14 },
661 { ICE_UDP_ILOS, 102 },
662 { ICE_PROTOCOL_LAST, 0 },
665 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
666 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
667 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00,
671 0x45, 0x00, 0x00, 0x60, /* IP 14 */
672 0x00, 0x00, 0x00, 0x00,
673 0x00, 0x11, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00,
675 0x00, 0x00, 0x00, 0x00,
677 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
678 0x00, 0x4c, 0x00, 0x00,
680 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x85,
684 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
685 0x00, 0x00, 0x00, 0x00,
687 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
688 0x00, 0x08, 0x11, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
699 0x00, 0x08, 0x00, 0x00,
701 0x00, 0x00, /* 2 bytes for 4 byte alignment */
704 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
706 { ICE_IPV6_OFOS, 14 },
711 { ICE_PROTOCOL_LAST, 0 },
714 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
715 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
716 0x00, 0x00, 0x00, 0x00,
717 0x00, 0x00, 0x00, 0x00,
720 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
721 0x00, 0x44, 0x11, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
727 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00,
729 0x00, 0x00, 0x00, 0x00,
731 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
732 0x00, 0x44, 0x00, 0x00,
734 0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
735 0x00, 0x00, 0x00, 0x00,
736 0x00, 0x00, 0x00, 0x85,
738 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
739 0x00, 0x00, 0x00, 0x00,
741 0x45, 0x00, 0x00, 0x28, /* IP 82 */
742 0x00, 0x00, 0x00, 0x00,
743 0x00, 0x06, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00,
745 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
750 0x50, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
753 0x00, 0x00, /* 2 bytes for 4 byte alignment */
756 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
758 { ICE_IPV6_OFOS, 14 },
762 { ICE_UDP_ILOS, 102 },
763 { ICE_PROTOCOL_LAST, 0 },
766 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
767 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
772 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
773 0x00, 0x38, 0x11, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
777 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00,
781 0x00, 0x00, 0x00, 0x00,
783 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
784 0x00, 0x38, 0x00, 0x00,
786 0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
787 0x00, 0x00, 0x00, 0x00,
788 0x00, 0x00, 0x00, 0x85,
790 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
791 0x00, 0x00, 0x00, 0x00,
793 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
794 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x11, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
797 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
800 0x00, 0x08, 0x00, 0x00,
802 0x00, 0x00, /* 2 bytes for 4 byte alignment */
805 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
807 { ICE_IPV6_OFOS, 14 },
812 { ICE_PROTOCOL_LAST, 0 },
815 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
816 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
821 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
822 0x00, 0x58, 0x11, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00,
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, 0x00, 0x00,
830 0x00, 0x00, 0x00, 0x00,
832 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
833 0x00, 0x58, 0x00, 0x00,
835 0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
836 0x00, 0x00, 0x00, 0x00,
837 0x00, 0x00, 0x00, 0x85,
839 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
840 0x00, 0x00, 0x00, 0x00,
842 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
843 0x00, 0x14, 0x06, 0x00,
844 0x00, 0x00, 0x00, 0x00,
845 0x00, 0x00, 0x00, 0x00,
846 0x00, 0x00, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
854 0x00, 0x00, 0x00, 0x00,
855 0x00, 0x00, 0x00, 0x00,
856 0x50, 0x00, 0x00, 0x00,
857 0x00, 0x00, 0x00, 0x00,
859 0x00, 0x00, /* 2 bytes for 4 byte alignment */
862 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
864 { ICE_IPV6_OFOS, 14 },
868 { ICE_UDP_ILOS, 122 },
869 { ICE_PROTOCOL_LAST, 0 },
872 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
873 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
878 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
879 0x00, 0x4c, 0x11, 0x00,
880 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00,
889 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
890 0x00, 0x4c, 0x00, 0x00,
892 0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
893 0x00, 0x00, 0x00, 0x00,
894 0x00, 0x00, 0x00, 0x85,
896 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
897 0x00, 0x00, 0x00, 0x00,
899 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
900 0x00, 0x08, 0x11, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
908 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
911 0x00, 0x08, 0x00, 0x00,
913 0x00, 0x00, /* 2 bytes for 4 byte alignment */
916 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
918 { ICE_IPV4_OFOS, 14 },
922 { ICE_PROTOCOL_LAST, 0 },
925 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
926 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
927 0x00, 0x00, 0x00, 0x00,
928 0x00, 0x00, 0x00, 0x00,
931 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
932 0x00, 0x00, 0x40, 0x00,
933 0x40, 0x11, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
937 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
938 0x00, 0x00, 0x00, 0x00,
940 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
941 0x00, 0x00, 0x00, 0x00,
942 0x00, 0x00, 0x00, 0x85,
944 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
945 0x00, 0x00, 0x00, 0x00,
947 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
948 0x00, 0x00, 0x40, 0x00,
949 0x40, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
951 0x00, 0x00, 0x00, 0x00,
956 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
958 { ICE_IPV4_OFOS, 14 },
962 { ICE_PROTOCOL_LAST, 0 },
965 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
966 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
967 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00,
971 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
972 0x00, 0x00, 0x40, 0x00,
973 0x40, 0x11, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
975 0x00, 0x00, 0x00, 0x00,
977 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
978 0x00, 0x00, 0x00, 0x00,
980 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
981 0x00, 0x00, 0x00, 0x00,
982 0x00, 0x00, 0x00, 0x85,
984 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
985 0x00, 0x00, 0x00, 0x00,
987 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
988 0x00, 0x00, 0x3b, 0x00,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
1002 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1003 { ICE_MAC_OFOS, 0 },
1004 { ICE_IPV6_OFOS, 14 },
1007 { ICE_IPV4_IL, 82 },
1008 { ICE_PROTOCOL_LAST, 0 },
1011 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1012 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1028 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1029 0x00, 0x00, 0x00, 0x00,
1031 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1032 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x85,
1035 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1036 0x00, 0x00, 0x00, 0x00,
1038 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1039 0x00, 0x00, 0x40, 0x00,
1040 0x40, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x00, 0x00, 0x00,
1048 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1049 { ICE_MAC_OFOS, 0 },
1050 { ICE_IPV6_OFOS, 14 },
1053 { ICE_IPV6_IL, 82 },
1054 { ICE_PROTOCOL_LAST, 0 },
1057 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1058 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1059 0x00, 0x00, 0x00, 0x00,
1060 0x00, 0x00, 0x00, 0x00,
1063 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1064 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1065 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1069 0x00, 0x00, 0x00, 0x00,
1070 0x00, 0x00, 0x00, 0x00,
1071 0x00, 0x00, 0x00, 0x00,
1072 0x00, 0x00, 0x00, 0x00,
1074 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1075 0x00, 0x00, 0x00, 0x00,
1077 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
1078 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00, 0x85,
1081 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1082 0x00, 0x00, 0x00, 0x00,
1084 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1085 0x00, 0x00, 0x3b, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00,
1089 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1092 0x00, 0x00, 0x00, 0x00,
1093 0x00, 0x00, 0x00, 0x00,
1098 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1099 { ICE_MAC_OFOS, 0 },
1100 { ICE_IPV4_OFOS, 14 },
1103 { ICE_PROTOCOL_LAST, 0 },
1106 static const u8 dummy_udp_gtp_packet[] = {
1107 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1108 0x00, 0x00, 0x00, 0x00,
1109 0x00, 0x00, 0x00, 0x00,
1112 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1113 0x00, 0x00, 0x00, 0x00,
1114 0x00, 0x11, 0x00, 0x00,
1115 0x00, 0x00, 0x00, 0x00,
1116 0x00, 0x00, 0x00, 0x00,
1118 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1119 0x00, 0x1c, 0x00, 0x00,
1121 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1122 0x00, 0x00, 0x00, 0x00,
1123 0x00, 0x00, 0x00, 0x85,
1125 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1126 0x00, 0x00, 0x00, 0x00,
1130 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1131 { ICE_MAC_OFOS, 0 },
1132 { ICE_IPV4_OFOS, 14 },
1134 { ICE_GTP_NO_PAY, 42 },
1135 { ICE_PROTOCOL_LAST, 0 },
1139 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1140 { ICE_MAC_OFOS, 0 },
1141 { ICE_IPV6_OFOS, 14 },
1143 { ICE_GTP_NO_PAY, 62 },
1144 { ICE_PROTOCOL_LAST, 0 },
1147 static const u8 dummy_ipv6_gtp_packet[] = {
1148 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1149 0x00, 0x00, 0x00, 0x00,
1150 0x00, 0x00, 0x00, 0x00,
1153 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1154 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1155 0x00, 0x00, 0x00, 0x00,
1156 0x00, 0x00, 0x00, 0x00,
1157 0x00, 0x00, 0x00, 0x00,
1158 0x00, 0x00, 0x00, 0x00,
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1161 0x00, 0x00, 0x00, 0x00,
1162 0x00, 0x00, 0x00, 0x00,
1164 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1165 0x00, 0x00, 0x00, 0x00,
1167 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1168 0x00, 0x00, 0x00, 0x00,
1173 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1174 { ICE_MAC_OFOS, 0 },
1175 { ICE_ETYPE_OL, 12 },
1176 { ICE_VLAN_OFOS, 14},
1178 { ICE_PROTOCOL_LAST, 0 },
1181 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1182 { ICE_MAC_OFOS, 0 },
1183 { ICE_ETYPE_OL, 12 },
1184 { ICE_VLAN_OFOS, 14},
1186 { ICE_IPV4_OFOS, 26 },
1187 { ICE_PROTOCOL_LAST, 0 },
1190 static const u8 dummy_pppoe_ipv4_packet[] = {
1191 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1192 0x00, 0x00, 0x00, 0x00,
1193 0x00, 0x00, 0x00, 0x00,
1195 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1197 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1199 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1202 0x00, 0x21, /* PPP Link Layer 24 */
1204 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1205 0x00, 0x00, 0x00, 0x00,
1206 0x00, 0x00, 0x00, 0x00,
1207 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1214 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1215 { ICE_MAC_OFOS, 0 },
1216 { ICE_ETYPE_OL, 12 },
1217 { ICE_VLAN_OFOS, 14},
1219 { ICE_IPV4_OFOS, 26 },
1221 { ICE_PROTOCOL_LAST, 0 },
1224 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1225 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1226 0x00, 0x00, 0x00, 0x00,
1227 0x00, 0x00, 0x00, 0x00,
1229 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1231 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1233 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1236 0x00, 0x21, /* PPP Link Layer 24 */
1238 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1239 0x00, 0x01, 0x00, 0x00,
1240 0x00, 0x06, 0x00, 0x00,
1241 0x00, 0x00, 0x00, 0x00,
1242 0x00, 0x00, 0x00, 0x00,
1244 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1245 0x00, 0x00, 0x00, 0x00,
1246 0x00, 0x00, 0x00, 0x00,
1247 0x50, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1254 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1255 { ICE_MAC_OFOS, 0 },
1256 { ICE_ETYPE_OL, 12 },
1257 { ICE_VLAN_OFOS, 14},
1259 { ICE_IPV4_OFOS, 26 },
1260 { ICE_UDP_ILOS, 46 },
1261 { ICE_PROTOCOL_LAST, 0 },
1264 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1266 0x00, 0x00, 0x00, 0x00,
1267 0x00, 0x00, 0x00, 0x00,
1269 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1271 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1273 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1276 0x00, 0x21, /* PPP Link Layer 24 */
1278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1279 0x00, 0x01, 0x00, 0x00,
1280 0x00, 0x11, 0x00, 0x00,
1281 0x00, 0x00, 0x00, 0x00,
1282 0x00, 0x00, 0x00, 0x00,
1284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1285 0x00, 0x08, 0x00, 0x00,
1287 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1290 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1291 { ICE_MAC_OFOS, 0 },
1292 { ICE_ETYPE_OL, 12 },
1293 { ICE_VLAN_OFOS, 14},
1295 { ICE_IPV6_OFOS, 26 },
1296 { ICE_PROTOCOL_LAST, 0 },
1299 static const u8 dummy_pppoe_ipv6_packet[] = {
1300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1301 0x00, 0x00, 0x00, 0x00,
1302 0x00, 0x00, 0x00, 0x00,
1304 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1306 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1308 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1311 0x00, 0x57, /* PPP Link Layer 24 */
1313 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1314 0x00, 0x00, 0x3b, 0x00,
1315 0x00, 0x00, 0x00, 0x00,
1316 0x00, 0x00, 0x00, 0x00,
1317 0x00, 0x00, 0x00, 0x00,
1318 0x00, 0x00, 0x00, 0x00,
1319 0x00, 0x00, 0x00, 0x00,
1320 0x00, 0x00, 0x00, 0x00,
1321 0x00, 0x00, 0x00, 0x00,
1322 0x00, 0x00, 0x00, 0x00,
1324 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1328 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1329 { ICE_MAC_OFOS, 0 },
1330 { ICE_ETYPE_OL, 12 },
1331 { ICE_VLAN_OFOS, 14},
1333 { ICE_IPV6_OFOS, 26 },
1335 { ICE_PROTOCOL_LAST, 0 },
1338 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1339 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1340 0x00, 0x00, 0x00, 0x00,
1341 0x00, 0x00, 0x00, 0x00,
1343 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1345 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1347 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1350 0x00, 0x57, /* PPP Link Layer 24 */
1352 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1353 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1354 0x00, 0x00, 0x00, 0x00,
1355 0x00, 0x00, 0x00, 0x00,
1356 0x00, 0x00, 0x00, 0x00,
1357 0x00, 0x00, 0x00, 0x00,
1358 0x00, 0x00, 0x00, 0x00,
1359 0x00, 0x00, 0x00, 0x00,
1360 0x00, 0x00, 0x00, 0x00,
1361 0x00, 0x00, 0x00, 0x00,
1363 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1364 0x00, 0x00, 0x00, 0x00,
1365 0x00, 0x00, 0x00, 0x00,
1366 0x50, 0x00, 0x00, 0x00,
1367 0x00, 0x00, 0x00, 0x00,
1369 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1373 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1374 { ICE_MAC_OFOS, 0 },
1375 { ICE_ETYPE_OL, 12 },
1376 { ICE_VLAN_OFOS, 14},
1378 { ICE_IPV6_OFOS, 26 },
1379 { ICE_UDP_ILOS, 66 },
1380 { ICE_PROTOCOL_LAST, 0 },
1383 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1384 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1385 0x00, 0x00, 0x00, 0x00,
1386 0x00, 0x00, 0x00, 0x00,
1388 0x81, 0x00, /* ICE_ETYPE_OL 12 */
1390 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
1392 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1395 0x00, 0x57, /* PPP Link Layer 24 */
1397 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1398 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1399 0x00, 0x00, 0x00, 0x00,
1400 0x00, 0x00, 0x00, 0x00,
1401 0x00, 0x00, 0x00, 0x00,
1402 0x00, 0x00, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00,
1404 0x00, 0x00, 0x00, 0x00,
1405 0x00, 0x00, 0x00, 0x00,
1406 0x00, 0x00, 0x00, 0x00,
1408 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1409 0x00, 0x08, 0x00, 0x00,
1411 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1414 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1415 { ICE_MAC_OFOS, 0 },
1416 { ICE_IPV4_OFOS, 14 },
1418 { ICE_PROTOCOL_LAST, 0 },
1421 static const u8 dummy_ipv4_esp_pkt[] = {
1422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1423 0x00, 0x00, 0x00, 0x00,
1424 0x00, 0x00, 0x00, 0x00,
1427 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1428 0x00, 0x00, 0x40, 0x00,
1429 0x40, 0x32, 0x00, 0x00,
1430 0x00, 0x00, 0x00, 0x00,
1431 0x00, 0x00, 0x00, 0x00,
1433 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1434 0x00, 0x00, 0x00, 0x00,
1435 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1438 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1439 { ICE_MAC_OFOS, 0 },
1440 { ICE_IPV6_OFOS, 14 },
1442 { ICE_PROTOCOL_LAST, 0 },
1445 static const u8 dummy_ipv6_esp_pkt[] = {
1446 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1447 0x00, 0x00, 0x00, 0x00,
1448 0x00, 0x00, 0x00, 0x00,
1451 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1452 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1453 0x00, 0x00, 0x00, 0x00,
1454 0x00, 0x00, 0x00, 0x00,
1455 0x00, 0x00, 0x00, 0x00,
1456 0x00, 0x00, 0x00, 0x00,
1457 0x00, 0x00, 0x00, 0x00,
1458 0x00, 0x00, 0x00, 0x00,
1459 0x00, 0x00, 0x00, 0x00,
1460 0x00, 0x00, 0x00, 0x00,
1462 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1463 0x00, 0x00, 0x00, 0x00,
1464 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1467 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1468 { ICE_MAC_OFOS, 0 },
1469 { ICE_IPV4_OFOS, 14 },
1471 { ICE_PROTOCOL_LAST, 0 },
1474 static const u8 dummy_ipv4_ah_pkt[] = {
1475 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1476 0x00, 0x00, 0x00, 0x00,
1477 0x00, 0x00, 0x00, 0x00,
1480 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1481 0x00, 0x00, 0x40, 0x00,
1482 0x40, 0x33, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00,
1484 0x00, 0x00, 0x00, 0x00,
1486 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1487 0x00, 0x00, 0x00, 0x00,
1488 0x00, 0x00, 0x00, 0x00,
1489 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1492 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1493 { ICE_MAC_OFOS, 0 },
1494 { ICE_IPV6_OFOS, 14 },
1496 { ICE_PROTOCOL_LAST, 0 },
1499 static const u8 dummy_ipv6_ah_pkt[] = {
1500 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1501 0x00, 0x00, 0x00, 0x00,
1502 0x00, 0x00, 0x00, 0x00,
1505 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1506 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1507 0x00, 0x00, 0x00, 0x00,
1508 0x00, 0x00, 0x00, 0x00,
1509 0x00, 0x00, 0x00, 0x00,
1510 0x00, 0x00, 0x00, 0x00,
1511 0x00, 0x00, 0x00, 0x00,
1512 0x00, 0x00, 0x00, 0x00,
1513 0x00, 0x00, 0x00, 0x00,
1514 0x00, 0x00, 0x00, 0x00,
1516 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1517 0x00, 0x00, 0x00, 0x00,
1518 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1522 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1523 { ICE_MAC_OFOS, 0 },
1524 { ICE_IPV4_OFOS, 14 },
1525 { ICE_UDP_ILOS, 34 },
1527 { ICE_PROTOCOL_LAST, 0 },
1530 static const u8 dummy_ipv4_nat_pkt[] = {
1531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1532 0x00, 0x00, 0x00, 0x00,
1533 0x00, 0x00, 0x00, 0x00,
1536 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1537 0x00, 0x00, 0x40, 0x00,
1538 0x40, 0x11, 0x00, 0x00,
1539 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00,
1542 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1543 0x00, 0x00, 0x00, 0x00,
1545 0x00, 0x00, 0x00, 0x00,
1546 0x00, 0x00, 0x00, 0x00,
1547 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1550 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1551 { ICE_MAC_OFOS, 0 },
1552 { ICE_IPV6_OFOS, 14 },
1553 { ICE_UDP_ILOS, 54 },
1555 { ICE_PROTOCOL_LAST, 0 },
1558 static const u8 dummy_ipv6_nat_pkt[] = {
1559 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1560 0x00, 0x00, 0x00, 0x00,
1561 0x00, 0x00, 0x00, 0x00,
1564 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1565 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1566 0x00, 0x00, 0x00, 0x00,
1567 0x00, 0x00, 0x00, 0x00,
1568 0x00, 0x00, 0x00, 0x00,
1569 0x00, 0x00, 0x00, 0x00,
1570 0x00, 0x00, 0x00, 0x00,
1571 0x00, 0x00, 0x00, 0x00,
1572 0x00, 0x00, 0x00, 0x00,
1573 0x00, 0x00, 0x00, 0x00,
1575 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1576 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, 0x00, 0x00,
1579 0x00, 0x00, 0x00, 0x00,
1580 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1584 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1585 { ICE_MAC_OFOS, 0 },
1586 { ICE_IPV4_OFOS, 14 },
1588 { ICE_PROTOCOL_LAST, 0 },
1591 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1592 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1593 0x00, 0x00, 0x00, 0x00,
1594 0x00, 0x00, 0x00, 0x00,
1597 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1598 0x00, 0x00, 0x40, 0x00,
1599 0x40, 0x73, 0x00, 0x00,
1600 0x00, 0x00, 0x00, 0x00,
1601 0x00, 0x00, 0x00, 0x00,
1603 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1604 0x00, 0x00, 0x00, 0x00,
1605 0x00, 0x00, 0x00, 0x00,
1606 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1609 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1610 { ICE_MAC_OFOS, 0 },
1611 { ICE_IPV6_OFOS, 14 },
1613 { ICE_PROTOCOL_LAST, 0 },
1616 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1617 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1618 0x00, 0x00, 0x00, 0x00,
1619 0x00, 0x00, 0x00, 0x00,
1622 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1623 0x00, 0x0c, 0x73, 0x40,
1624 0x00, 0x00, 0x00, 0x00,
1625 0x00, 0x00, 0x00, 0x00,
1626 0x00, 0x00, 0x00, 0x00,
1627 0x00, 0x00, 0x00, 0x00,
1628 0x00, 0x00, 0x00, 0x00,
1629 0x00, 0x00, 0x00, 0x00,
1630 0x00, 0x00, 0x00, 0x00,
1631 0x00, 0x00, 0x00, 0x00,
1633 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1634 0x00, 0x00, 0x00, 0x00,
1635 0x00, 0x00, 0x00, 0x00,
1636 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1639 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1640 { ICE_MAC_OFOS, 0 },
1641 { ICE_ETYPE_OL, 12 },
1642 { ICE_VLAN_EX, 14 },
1643 { ICE_VLAN_IN, 18 },
1644 { ICE_IPV4_OFOS, 22 },
1645 { ICE_PROTOCOL_LAST, 0 },
1648 static const u8 dummy_qinq_ipv4_pkt[] = {
1649 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1650 0x00, 0x00, 0x00, 0x00,
1651 0x00, 0x00, 0x00, 0x00,
1653 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1655 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1656 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_IN 18 */
1658 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1659 0x00, 0x01, 0x00, 0x00,
1660 0x00, 0x11, 0x00, 0x00,
1661 0x00, 0x00, 0x00, 0x00,
1662 0x00, 0x00, 0x00, 0x00,
1664 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1665 0x00, 0x08, 0x00, 0x00,
1667 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1670 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1671 { ICE_MAC_OFOS, 0 },
1672 { ICE_ETYPE_OL, 12 },
1673 { ICE_VLAN_EX, 14 },
1674 { ICE_VLAN_IN, 18 },
1675 { ICE_IPV6_OFOS, 22 },
1676 { ICE_PROTOCOL_LAST, 0 },
1679 static const u8 dummy_qinq_ipv6_pkt[] = {
1680 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1681 0x00, 0x00, 0x00, 0x00,
1682 0x00, 0x00, 0x00, 0x00,
1684 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1686 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1687 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_IN 18 */
1689 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1690 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1691 0x00, 0x00, 0x00, 0x00,
1692 0x00, 0x00, 0x00, 0x00,
1693 0x00, 0x00, 0x00, 0x00,
1694 0x00, 0x00, 0x00, 0x00,
1695 0x00, 0x00, 0x00, 0x00,
1696 0x00, 0x00, 0x00, 0x00,
1697 0x00, 0x00, 0x00, 0x00,
1698 0x00, 0x00, 0x00, 0x00,
1700 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1701 0x00, 0x10, 0x00, 0x00,
1703 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1704 0x00, 0x00, 0x00, 0x00,
1706 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1709 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1710 { ICE_MAC_OFOS, 0 },
1711 { ICE_ETYPE_OL, 12 },
1712 { ICE_VLAN_EX, 14 },
1713 { ICE_VLAN_IN, 18 },
1715 { ICE_PROTOCOL_LAST, 0 },
1719 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1720 { ICE_MAC_OFOS, 0 },
1721 { ICE_ETYPE_OL, 12 },
1722 { ICE_VLAN_EX, 14 },
1723 { ICE_VLAN_IN, 18 },
1725 { ICE_IPV4_OFOS, 30 },
1726 { ICE_PROTOCOL_LAST, 0 },
1729 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1730 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1731 0x00, 0x00, 0x00, 0x00,
1732 0x00, 0x00, 0x00, 0x00,
1734 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1736 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1737 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_IN 18 */
1739 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1742 0x00, 0x21, /* PPP Link Layer 28 */
1744 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1745 0x00, 0x00, 0x00, 0x00,
1746 0x00, 0x00, 0x00, 0x00,
1747 0x00, 0x00, 0x00, 0x00,
1748 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1754 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1755 { ICE_MAC_OFOS, 0 },
1756 { ICE_ETYPE_OL, 12 },
1758 { ICE_VLAN_IN, 18 },
1760 { ICE_IPV6_OFOS, 30 },
1761 { ICE_PROTOCOL_LAST, 0 },
1764 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1765 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1766 0x00, 0x00, 0x00, 0x00,
1767 0x00, 0x00, 0x00, 0x00,
1769 0x91, 0x00, /* ICE_ETYPE_OL 12 */
1771 0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1772 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_IN 18 */
1774 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1777 0x00, 0x57, /* PPP Link Layer 28*/
1779 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1780 0x00, 0x00, 0x3b, 0x00,
1781 0x00, 0x00, 0x00, 0x00,
1782 0x00, 0x00, 0x00, 0x00,
1783 0x00, 0x00, 0x00, 0x00,
1784 0x00, 0x00, 0x00, 0x00,
1785 0x00, 0x00, 0x00, 0x00,
1786 0x00, 0x00, 0x00, 0x00,
1787 0x00, 0x00, 0x00, 0x00,
1788 0x00, 0x00, 0x00, 0x00,
1790 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1793 /* this is a recipe to profile association bitmap */
1794 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1795 ICE_MAX_NUM_PROFILES);
1797 /* this is a profile to recipe association bitmap */
1798 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1799 ICE_MAX_NUM_RECIPES);
1801 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1804 * ice_collect_result_idx - copy result index values
1805 * @buf: buffer that contains the result index
1806 * @recp: the recipe struct to copy data into
1808 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1809 struct ice_sw_recipe *recp)
1811 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1812 ice_set_bit(buf->content.result_indx &
1813 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1816 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1817 { ICE_PROFID_IPV4_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV4},
1818 { ICE_PROFID_IPV4_GTPU_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1819 { ICE_PROFID_IPV4_GTPU_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1820 { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1821 { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1822 { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1823 { ICE_PROFID_IPV4_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_IPV6},
1824 { ICE_PROFID_IPV4_GTPU_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1825 { ICE_PROFID_IPV4_GTPU_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1826 { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1827 { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1828 { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1829 { ICE_PROFID_IPV6_GTPU_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV4},
1830 { ICE_PROFID_IPV6_GTPU_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1831 { ICE_PROFID_IPV6_GTPU_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1832 { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1833 { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1834 { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1835 { ICE_PROFID_IPV6_GTPU_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_IPV6},
1836 { ICE_PROFID_IPV6_GTPU_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1837 { ICE_PROFID_IPV6_GTPU_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1838 { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1839 { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1840 { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1844 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1845 * @rid: recipe ID that we are populating
1847 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1849 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1850 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1851 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1852 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1853 enum ice_sw_tunnel_type tun_type;
1854 u16 i, j, k, profile_num = 0;
1855 bool non_tun_valid = false;
1856 bool pppoe_valid = false;
1857 bool vxlan_valid = false;
1858 bool gre_valid = false;
1859 bool gtp_valid = false;
1860 bool flag_valid = false;
1862 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1863 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1868 for (i = 0; i < 12; i++) {
1869 if (gre_profile[i] == j)
1873 for (i = 0; i < 12; i++) {
1874 if (vxlan_profile[i] == j)
1878 for (i = 0; i < 7; i++) {
1879 if (pppoe_profile[i] == j)
1883 for (i = 0; i < 6; i++) {
1884 if (non_tun_profile[i] == j)
1885 non_tun_valid = true;
1888 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1889 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1892 if ((j >= ICE_PROFID_IPV4_ESP &&
1893 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1894 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1895 j <= ICE_PROFID_IPV6_GTPU_TEID))
1899 if (!non_tun_valid && vxlan_valid)
1900 tun_type = ICE_SW_TUN_VXLAN;
1901 else if (!non_tun_valid && gre_valid)
1902 tun_type = ICE_SW_TUN_NVGRE;
1903 else if (!non_tun_valid && pppoe_valid)
1904 tun_type = ICE_SW_TUN_PPPOE;
1905 else if (!non_tun_valid && gtp_valid)
1906 tun_type = ICE_SW_TUN_GTP;
1907 else if (non_tun_valid &&
1908 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1909 tun_type = ICE_SW_TUN_AND_NON_TUN;
1910 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1912 tun_type = ICE_NON_TUN;
1914 tun_type = ICE_NON_TUN;
1916 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1917 i = ice_is_bit_set(recipe_to_profile[rid],
1918 ICE_PROFID_PPPOE_IPV4_OTHER);
1919 j = ice_is_bit_set(recipe_to_profile[rid],
1920 ICE_PROFID_PPPOE_IPV6_OTHER);
1922 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1924 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1927 if (tun_type == ICE_SW_TUN_GTP) {
1928 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
1929 if (ice_is_bit_set(recipe_to_profile[rid],
1930 ice_prof_type_tbl[k].prof_id)) {
1931 tun_type = ice_prof_type_tbl[k].type;
1936 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1937 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1938 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1940 case ICE_PROFID_IPV4_TCP:
1941 tun_type = ICE_SW_IPV4_TCP;
1943 case ICE_PROFID_IPV4_UDP:
1944 tun_type = ICE_SW_IPV4_UDP;
1946 case ICE_PROFID_IPV6_TCP:
1947 tun_type = ICE_SW_IPV6_TCP;
1949 case ICE_PROFID_IPV6_UDP:
1950 tun_type = ICE_SW_IPV6_UDP;
1952 case ICE_PROFID_PPPOE_PAY:
1953 tun_type = ICE_SW_TUN_PPPOE_PAY;
1955 case ICE_PROFID_PPPOE_IPV4_TCP:
1956 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1958 case ICE_PROFID_PPPOE_IPV4_UDP:
1959 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1961 case ICE_PROFID_PPPOE_IPV4_OTHER:
1962 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1964 case ICE_PROFID_PPPOE_IPV6_TCP:
1965 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1967 case ICE_PROFID_PPPOE_IPV6_UDP:
1968 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1970 case ICE_PROFID_PPPOE_IPV6_OTHER:
1971 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1973 case ICE_PROFID_IPV4_ESP:
1974 tun_type = ICE_SW_TUN_IPV4_ESP;
1976 case ICE_PROFID_IPV6_ESP:
1977 tun_type = ICE_SW_TUN_IPV6_ESP;
1979 case ICE_PROFID_IPV4_AH:
1980 tun_type = ICE_SW_TUN_IPV4_AH;
1982 case ICE_PROFID_IPV6_AH:
1983 tun_type = ICE_SW_TUN_IPV6_AH;
1985 case ICE_PROFID_IPV4_NAT_T:
1986 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1988 case ICE_PROFID_IPV6_NAT_T:
1989 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1991 case ICE_PROFID_IPV4_PFCP_NODE:
1993 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1995 case ICE_PROFID_IPV6_PFCP_NODE:
1997 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1999 case ICE_PROFID_IPV4_PFCP_SESSION:
2001 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2003 case ICE_PROFID_IPV6_PFCP_SESSION:
2005 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2007 case ICE_PROFID_MAC_IPV4_L2TPV3:
2008 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2010 case ICE_PROFID_MAC_IPV6_L2TPV3:
2011 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2013 case ICE_PROFID_IPV4_GTPU_TEID:
2014 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2016 case ICE_PROFID_IPV6_GTPU_TEID:
2017 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2028 if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2029 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2030 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2031 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2032 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2033 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2034 else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2035 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2036 else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2037 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2038 else if (vlan && tun_type == ICE_NON_TUN)
2039 tun_type = ICE_NON_TUN_QINQ;
2045 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2046 * @hw: pointer to hardware structure
2047 * @recps: struct that we need to populate
2048 * @rid: recipe ID that we are populating
2049 * @refresh_required: true if we should get recipe to profile mapping from FW
2051 * This function is used to populate all the necessary entries into our
2052 * bookkeeping so that we have a current list of all the recipes that are
2053 * programmed in the firmware.
2055 static enum ice_status
2056 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2057 bool *refresh_required)
2059 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2060 struct ice_aqc_recipe_data_elem *tmp;
2061 u16 num_recps = ICE_MAX_NUM_RECIPES;
2062 struct ice_prot_lkup_ext *lkup_exts;
2063 enum ice_status status;
2068 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2070 /* we need a buffer big enough to accommodate all the recipes */
2071 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2072 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2074 return ICE_ERR_NO_MEMORY;
2076 tmp[0].recipe_indx = rid;
2077 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2078 /* non-zero status meaning recipe doesn't exist */
2082 /* Get recipe to profile map so that we can get the fv from lkups that
2083 * we read for a recipe from FW. Since we want to minimize the number of
2084 * times we make this FW call, just make one call and cache the copy
2085 * until a new recipe is added. This operation is only required the
2086 * first time to get the changes from FW. Then to search existing
2087 * entries we don't need to update the cache again until another recipe
2090 if (*refresh_required) {
2091 ice_get_recp_to_prof_map(hw);
2092 *refresh_required = false;
2095 /* Start populating all the entries for recps[rid] based on lkups from
2096 * firmware. Note that we are only creating the root recipe in our
2099 lkup_exts = &recps[rid].lkup_exts;
2101 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2102 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2103 struct ice_recp_grp_entry *rg_entry;
2104 u8 i, prof, idx, prot = 0;
2108 rg_entry = (struct ice_recp_grp_entry *)
2109 ice_malloc(hw, sizeof(*rg_entry));
2111 status = ICE_ERR_NO_MEMORY;
2115 idx = root_bufs.recipe_indx;
2116 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2118 /* Mark all result indices in this chain */
2119 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2120 ice_set_bit(root_bufs.content.result_indx &
2121 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2123 /* get the first profile that is associated with rid */
2124 prof = ice_find_first_bit(recipe_to_profile[idx],
2125 ICE_MAX_NUM_PROFILES);
2126 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2127 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2129 rg_entry->fv_idx[i] = lkup_indx;
2130 rg_entry->fv_mask[i] =
2131 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2133 /* If the recipe is a chained recipe then all its
2134 * child recipe's result will have a result index.
2135 * To fill fv_words we should not use those result
2136 * index, we only need the protocol ids and offsets.
2137 * We will skip all the fv_idx which stores result
2138 * index in them. We also need to skip any fv_idx which
2139 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2140 * valid offset value.
2142 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2143 rg_entry->fv_idx[i]) ||
2144 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2145 rg_entry->fv_idx[i] == 0)
2148 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2149 rg_entry->fv_idx[i], &prot, &off);
2150 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2151 lkup_exts->fv_words[fv_word_idx].off = off;
2152 lkup_exts->field_mask[fv_word_idx] =
2153 rg_entry->fv_mask[i];
2154 if (prot == ICE_META_DATA_ID_HW &&
2155 off == ICE_TUN_FLAG_MDID_OFF)
2159 /* populate rg_list with the data from the child entry of this
2162 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2164 /* Propagate some data to the recipe database */
2165 recps[idx].is_root = !!is_root;
2166 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2167 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2168 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2169 recps[idx].chain_idx = root_bufs.content.result_indx &
2170 ~ICE_AQ_RECIPE_RESULT_EN;
2171 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2173 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2179 /* Only do the following for root recipes entries */
2180 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2181 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2182 recps[idx].root_rid = root_bufs.content.rid &
2183 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2184 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2187 /* Complete initialization of the root recipe entry */
2188 lkup_exts->n_val_words = fv_word_idx;
2189 recps[rid].big_recp = (num_recps > 1);
2190 recps[rid].n_grp_count = (u8)num_recps;
2191 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2192 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2193 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2194 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2195 if (!recps[rid].root_buf)
2198 /* Copy result indexes */
2199 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2200 recps[rid].recp_created = true;
2208 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2209 * @hw: pointer to hardware structure
2211 * This function is used to populate recipe_to_profile matrix where index to
2212 * this array is the recipe ID and the element is the mapping of which profiles
2213 * is this recipe mapped to.
2215 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2217 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2220 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2223 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2224 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2225 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2227 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2228 ICE_MAX_NUM_RECIPES);
2229 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2230 ice_set_bit(i, recipe_to_profile[j]);
2235 * ice_init_def_sw_recp - initialize the recipe book keeping tables
2236 * @hw: pointer to the HW struct
2237 * @recp_list: pointer to sw recipe list
2239 * Allocate memory for the entire recipe table and initialize the structures/
2240 * entries corresponding to basic recipes.
2243 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2245 struct ice_sw_recipe *recps;
2248 recps = (struct ice_sw_recipe *)
2249 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2251 return ICE_ERR_NO_MEMORY;
2253 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2254 recps[i].root_rid = i;
2255 INIT_LIST_HEAD(&recps[i].filt_rules);
2256 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2257 INIT_LIST_HEAD(&recps[i].rg_list);
2258 ice_init_lock(&recps[i].filt_rule_lock);
2267 * ice_aq_get_sw_cfg - get switch configuration
2268 * @hw: pointer to the hardware structure
2269 * @buf: pointer to the result buffer
2270 * @buf_size: length of the buffer available for response
2271 * @req_desc: pointer to requested descriptor
2272 * @num_elems: pointer to number of elements
2273 * @cd: pointer to command details structure or NULL
2275 * Get switch configuration (0x0200) to be placed in buf.
2276 * This admin command returns information such as initial VSI/port number
2277 * and switch ID it belongs to.
2279 * NOTE: *req_desc is both an input/output parameter.
2280 * The caller of this function first calls this function with *request_desc set
2281 * to 0. If the response from f/w has *req_desc set to 0, all the switch
2282 * configuration information has been returned; if non-zero (meaning not all
2283 * the information was returned), the caller should call this function again
2284 * with *req_desc set to the previous value returned by f/w to get the
2285 * next block of switch configuration information.
2287 * *num_elems is output only parameter. This reflects the number of elements
2288 * in response buffer. The caller of this function to use *num_elems while
2289 * parsing the response buffer.
2291 static enum ice_status
2292 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2293 u16 buf_size, u16 *req_desc, u16 *num_elems,
2294 struct ice_sq_cd *cd)
2296 struct ice_aqc_get_sw_cfg *cmd;
2297 struct ice_aq_desc desc;
2298 enum ice_status status;
2300 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2301 cmd = &desc.params.get_sw_conf;
2302 cmd->element = CPU_TO_LE16(*req_desc);
2304 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2306 *req_desc = LE16_TO_CPU(cmd->element);
2307 *num_elems = LE16_TO_CPU(cmd->num_elems);
2314 * ice_alloc_rss_global_lut - allocate a RSS global LUT
2315 * @hw: pointer to the HW struct
2316 * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2317 * @global_lut_id: output parameter for the RSS global LUT's ID
2319 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2321 struct ice_aqc_alloc_free_res_elem *sw_buf;
2322 enum ice_status status;
2325 buf_len = ice_struct_size(sw_buf, elem, 1);
2326 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2328 return ICE_ERR_NO_MEMORY;
2330 sw_buf->num_elems = CPU_TO_LE16(1);
2331 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2332 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2333 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2335 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2337 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2338 shared_res ? "shared" : "dedicated", status);
2339 goto ice_alloc_global_lut_exit;
2342 *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2344 ice_alloc_global_lut_exit:
2345 ice_free(hw, sw_buf);
2350 * ice_free_rss_global_lut - free a RSS global LUT
2351 * @hw: pointer to the HW struct
2352 * @global_lut_id: ID of the RSS global LUT to free
2354 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2356 struct ice_aqc_alloc_free_res_elem *sw_buf;
2357 u16 buf_len, num_elems = 1;
2358 enum ice_status status;
2360 buf_len = ice_struct_size(sw_buf, elem, num_elems);
2361 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2363 return ICE_ERR_NO_MEMORY;
2365 sw_buf->num_elems = CPU_TO_LE16(num_elems);
2366 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2367 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2369 status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2371 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2372 global_lut_id, status);
2374 ice_free(hw, sw_buf);
2379 * ice_alloc_sw - allocate resources specific to switch
2380 * @hw: pointer to the HW struct
2381 * @ena_stats: true to turn on VEB stats
2382 * @shared_res: true for shared resource, false for dedicated resource
2383 * @sw_id: switch ID returned
2384 * @counter_id: VEB counter ID returned
2386 * allocates switch resources (SWID and VEB counter) (0x0208)
2389 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2392 struct ice_aqc_alloc_free_res_elem *sw_buf;
2393 struct ice_aqc_res_elem *sw_ele;
2394 enum ice_status status;
2397 buf_len = ice_struct_size(sw_buf, elem, 1);
2398 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2400 return ICE_ERR_NO_MEMORY;
2402 /* Prepare buffer for switch ID.
2403 * The number of resource entries in buffer is passed as 1 since only a
2404 * single switch/VEB instance is allocated, and hence a single sw_id
2407 sw_buf->num_elems = CPU_TO_LE16(1);
2409 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2410 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2411 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2413 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2414 ice_aqc_opc_alloc_res, NULL);
2417 goto ice_alloc_sw_exit;
2419 sw_ele = &sw_buf->elem[0];
2420 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2423 /* Prepare buffer for VEB Counter */
2424 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2425 struct ice_aqc_alloc_free_res_elem *counter_buf;
2426 struct ice_aqc_res_elem *counter_ele;
2428 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2429 ice_malloc(hw, buf_len);
2431 status = ICE_ERR_NO_MEMORY;
2432 goto ice_alloc_sw_exit;
2435 /* The number of resource entries in buffer is passed as 1 since
2436 * only a single switch/VEB instance is allocated, and hence a
2437 * single VEB counter is requested.
2439 counter_buf->num_elems = CPU_TO_LE16(1);
2440 counter_buf->res_type =
2441 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2442 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2443 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2447 ice_free(hw, counter_buf);
2448 goto ice_alloc_sw_exit;
2450 counter_ele = &counter_buf->elem[0];
2451 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2452 ice_free(hw, counter_buf);
2456 ice_free(hw, sw_buf);
2461 * ice_free_sw - free resources specific to switch
2462 * @hw: pointer to the HW struct
2463 * @sw_id: switch ID returned
2464 * @counter_id: VEB counter ID returned
2466 * free switch resources (SWID and VEB counter) (0x0209)
2468 * NOTE: This function frees multiple resources. It continues
2469 * releasing other resources even after it encounters error.
2470 * The error code returned is the last error it encountered.
2472 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2474 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2475 enum ice_status status, ret_status;
2478 buf_len = ice_struct_size(sw_buf, elem, 1);
2479 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2481 return ICE_ERR_NO_MEMORY;
2483 /* Prepare buffer to free for switch ID res.
2484 * The number of resource entries in buffer is passed as 1 since only a
2485 * single switch/VEB instance is freed, and hence a single sw_id
2488 sw_buf->num_elems = CPU_TO_LE16(1);
2489 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2490 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2492 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2493 ice_aqc_opc_free_res, NULL);
2496 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2498 /* Prepare buffer to free for VEB Counter resource */
2499 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2500 ice_malloc(hw, buf_len);
2502 ice_free(hw, sw_buf);
2503 return ICE_ERR_NO_MEMORY;
2506 /* The number of resource entries in buffer is passed as 1 since only a
2507 * single switch/VEB instance is freed, and hence a single VEB counter
2510 counter_buf->num_elems = CPU_TO_LE16(1);
2511 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2512 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2514 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2515 ice_aqc_opc_free_res, NULL);
2517 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2518 ret_status = status;
2521 ice_free(hw, counter_buf);
2522 ice_free(hw, sw_buf);
2528 * @hw: pointer to the HW struct
2529 * @vsi_ctx: pointer to a VSI context struct
2530 * @cd: pointer to command details structure or NULL
2532 * Add a VSI context to the hardware (0x0210)
2535 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2536 struct ice_sq_cd *cd)
2538 struct ice_aqc_add_update_free_vsi_resp *res;
2539 struct ice_aqc_add_get_update_free_vsi *cmd;
2540 struct ice_aq_desc desc;
2541 enum ice_status status;
2543 cmd = &desc.params.vsi_cmd;
2544 res = &desc.params.add_update_free_vsi_res;
2546 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2548 if (!vsi_ctx->alloc_from_pool)
2549 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2550 ICE_AQ_VSI_IS_VALID);
2552 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2554 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2556 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2557 sizeof(vsi_ctx->info), cd);
2560 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2561 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2562 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2570 * @hw: pointer to the HW struct
2571 * @vsi_ctx: pointer to a VSI context struct
2572 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2573 * @cd: pointer to command details structure or NULL
2575 * Free VSI context info from hardware (0x0213)
2578 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2579 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2581 struct ice_aqc_add_update_free_vsi_resp *resp;
2582 struct ice_aqc_add_get_update_free_vsi *cmd;
2583 struct ice_aq_desc desc;
2584 enum ice_status status;
2586 cmd = &desc.params.vsi_cmd;
2587 resp = &desc.params.add_update_free_vsi_res;
2589 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2591 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2593 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2595 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2597 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2598 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2606 * @hw: pointer to the HW struct
2607 * @vsi_ctx: pointer to a VSI context struct
2608 * @cd: pointer to command details structure or NULL
2610 * Update VSI context in the hardware (0x0211)
2613 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2614 struct ice_sq_cd *cd)
2616 struct ice_aqc_add_update_free_vsi_resp *resp;
2617 struct ice_aqc_add_get_update_free_vsi *cmd;
2618 struct ice_aq_desc desc;
2619 enum ice_status status;
2621 cmd = &desc.params.vsi_cmd;
2622 resp = &desc.params.add_update_free_vsi_res;
2624 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2626 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2628 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2630 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2631 sizeof(vsi_ctx->info), cd);
2634 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2635 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2642 * ice_is_vsi_valid - check whether the VSI is valid or not
2643 * @hw: pointer to the HW struct
2644 * @vsi_handle: VSI handle
2646 * check whether the VSI is valid or not
2648 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2650 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2654 * ice_get_hw_vsi_num - return the HW VSI number
2655 * @hw: pointer to the HW struct
2656 * @vsi_handle: VSI handle
2658 * return the HW VSI number
2659 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2661 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2663 return hw->vsi_ctx[vsi_handle]->vsi_num;
2667 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2668 * @hw: pointer to the HW struct
2669 * @vsi_handle: VSI handle
2671 * return the VSI context entry for a given VSI handle
2673 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2675 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2679 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2680 * @hw: pointer to the HW struct
2681 * @vsi_handle: VSI handle
2682 * @vsi: VSI context pointer
2684 * save the VSI context entry for a given VSI handle
2687 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2689 hw->vsi_ctx[vsi_handle] = vsi;
2693 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2694 * @hw: pointer to the HW struct
2695 * @vsi_handle: VSI handle
2697 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2699 struct ice_vsi_ctx *vsi;
2702 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2705 ice_for_each_traffic_class(i) {
2706 if (vsi->lan_q_ctx[i]) {
2707 ice_free(hw, vsi->lan_q_ctx[i]);
2708 vsi->lan_q_ctx[i] = NULL;
2714 * ice_clear_vsi_ctx - clear the VSI context entry
2715 * @hw: pointer to the HW struct
2716 * @vsi_handle: VSI handle
2718 * clear the VSI context entry
2720 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2722 struct ice_vsi_ctx *vsi;
2724 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2726 ice_clear_vsi_q_ctx(hw, vsi_handle);
2728 hw->vsi_ctx[vsi_handle] = NULL;
2733 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2734 * @hw: pointer to the HW struct
2736 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2740 for (i = 0; i < ICE_MAX_VSI; i++)
2741 ice_clear_vsi_ctx(hw, i);
2745 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2746 * @hw: pointer to the HW struct
2747 * @vsi_handle: unique VSI handle provided by drivers
2748 * @vsi_ctx: pointer to a VSI context struct
2749 * @cd: pointer to command details structure or NULL
2751 * Add a VSI context to the hardware also add it into the VSI handle list.
2752 * If this function gets called after reset for existing VSIs then update
2753 * with the new HW VSI number in the corresponding VSI handle list entry.
2756 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2757 struct ice_sq_cd *cd)
2759 struct ice_vsi_ctx *tmp_vsi_ctx;
2760 enum ice_status status;
2762 if (vsi_handle >= ICE_MAX_VSI)
2763 return ICE_ERR_PARAM;
2764 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2767 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2769 /* Create a new VSI context */
2770 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2771 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2773 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2774 return ICE_ERR_NO_MEMORY;
2776 *tmp_vsi_ctx = *vsi_ctx;
2778 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2780 /* update with new HW VSI num */
2781 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2788 * ice_free_vsi- free VSI context from hardware and VSI handle list
2789 * @hw: pointer to the HW struct
2790 * @vsi_handle: unique VSI handle
2791 * @vsi_ctx: pointer to a VSI context struct
2792 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2793 * @cd: pointer to command details structure or NULL
2795 * Free VSI context info from hardware as well as from VSI handle list
2798 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2799 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2801 enum ice_status status;
2803 if (!ice_is_vsi_valid(hw, vsi_handle))
2804 return ICE_ERR_PARAM;
2805 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2806 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2808 ice_clear_vsi_ctx(hw, vsi_handle);
2814 * @hw: pointer to the HW struct
2815 * @vsi_handle: unique VSI handle
2816 * @vsi_ctx: pointer to a VSI context struct
2817 * @cd: pointer to command details structure or NULL
2819 * Update VSI context in the hardware
2822 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2823 struct ice_sq_cd *cd)
2825 if (!ice_is_vsi_valid(hw, vsi_handle))
2826 return ICE_ERR_PARAM;
2827 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2828 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2832 * ice_aq_get_vsi_params
2833 * @hw: pointer to the HW struct
2834 * @vsi_ctx: pointer to a VSI context struct
2835 * @cd: pointer to command details structure or NULL
2837 * Get VSI context info from hardware (0x0212)
2840 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2841 struct ice_sq_cd *cd)
2843 struct ice_aqc_add_get_update_free_vsi *cmd;
2844 struct ice_aqc_get_vsi_resp *resp;
2845 struct ice_aq_desc desc;
2846 enum ice_status status;
2848 cmd = &desc.params.vsi_cmd;
2849 resp = &desc.params.get_vsi_resp;
2851 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2853 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2855 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2856 sizeof(vsi_ctx->info), cd);
2858 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2860 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2861 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2868 * ice_aq_add_update_mir_rule - add/update a mirror rule
2869 * @hw: pointer to the HW struct
2870 * @rule_type: Rule Type
2871 * @dest_vsi: VSI number to which packets will be mirrored
2872 * @count: length of the list
2873 * @mr_buf: buffer for list of mirrored VSI numbers
2874 * @cd: pointer to command details structure or NULL
2877 * Add/Update Mirror Rule (0x260).
2880 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2881 u16 count, struct ice_mir_rule_buf *mr_buf,
2882 struct ice_sq_cd *cd, u16 *rule_id)
2884 struct ice_aqc_add_update_mir_rule *cmd;
2885 struct ice_aq_desc desc;
2886 enum ice_status status;
2887 __le16 *mr_list = NULL;
2890 switch (rule_type) {
2891 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2892 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2893 /* Make sure count and mr_buf are set for these rule_types */
2894 if (!(count && mr_buf))
2895 return ICE_ERR_PARAM;
2897 buf_size = count * sizeof(__le16);
2898 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2900 return ICE_ERR_NO_MEMORY;
2902 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2903 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2904 /* Make sure count and mr_buf are not set for these
2907 if (count || mr_buf)
2908 return ICE_ERR_PARAM;
2911 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2912 return ICE_ERR_OUT_OF_RANGE;
2915 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2917 /* Pre-process 'mr_buf' items for add/update of virtual port
2918 * ingress/egress mirroring (but not physical port ingress/egress
2924 for (i = 0; i < count; i++) {
2927 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2929 /* Validate specified VSI number, make sure it is less
2930 * than ICE_MAX_VSI, if not return with error.
2932 if (id >= ICE_MAX_VSI) {
2933 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2935 ice_free(hw, mr_list);
2936 return ICE_ERR_OUT_OF_RANGE;
2939 /* add VSI to mirror rule */
2942 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2943 else /* remove VSI from mirror rule */
2944 mr_list[i] = CPU_TO_LE16(id);
2948 cmd = &desc.params.add_update_rule;
2949 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2950 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2951 ICE_AQC_RULE_ID_VALID_M);
2952 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2953 cmd->num_entries = CPU_TO_LE16(count);
2954 cmd->dest = CPU_TO_LE16(dest_vsi);
2956 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2958 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2960 ice_free(hw, mr_list);
2966 * ice_aq_delete_mir_rule - delete a mirror rule
2967 * @hw: pointer to the HW struct
2968 * @rule_id: Mirror rule ID (to be deleted)
2969 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2970 * otherwise it is returned to the shared pool
2971 * @cd: pointer to command details structure or NULL
2973 * Delete Mirror Rule (0x261).
2976 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2977 struct ice_sq_cd *cd)
2979 struct ice_aqc_delete_mir_rule *cmd;
2980 struct ice_aq_desc desc;
2982 /* rule_id should be in the range 0...63 */
2983 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2984 return ICE_ERR_OUT_OF_RANGE;
2986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2988 cmd = &desc.params.del_rule;
2989 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2990 cmd->rule_id = CPU_TO_LE16(rule_id);
2993 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2995 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2999 * ice_aq_alloc_free_vsi_list
3000 * @hw: pointer to the HW struct
3001 * @vsi_list_id: VSI list ID returned or used for lookup
3002 * @lkup_type: switch rule filter lookup type
3003 * @opc: switch rules population command type - pass in the command opcode
3005 * allocates or free a VSI list resource
3007 static enum ice_status
3008 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3009 enum ice_sw_lkup_type lkup_type,
3010 enum ice_adminq_opc opc)
3012 struct ice_aqc_alloc_free_res_elem *sw_buf;
3013 struct ice_aqc_res_elem *vsi_ele;
3014 enum ice_status status;
3017 buf_len = ice_struct_size(sw_buf, elem, 1);
3018 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3020 return ICE_ERR_NO_MEMORY;
3021 sw_buf->num_elems = CPU_TO_LE16(1);
3023 if (lkup_type == ICE_SW_LKUP_MAC ||
3024 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3025 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3026 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3027 lkup_type == ICE_SW_LKUP_PROMISC ||
3028 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3029 lkup_type == ICE_SW_LKUP_LAST) {
3030 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3031 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3033 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3035 status = ICE_ERR_PARAM;
3036 goto ice_aq_alloc_free_vsi_list_exit;
3039 if (opc == ice_aqc_opc_free_res)
3040 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3042 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3044 goto ice_aq_alloc_free_vsi_list_exit;
3046 if (opc == ice_aqc_opc_alloc_res) {
3047 vsi_ele = &sw_buf->elem[0];
3048 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3051 ice_aq_alloc_free_vsi_list_exit:
3052 ice_free(hw, sw_buf);
3057 * ice_aq_set_storm_ctrl - Sets storm control configuration
3058 * @hw: pointer to the HW struct
3059 * @bcast_thresh: represents the upper threshold for broadcast storm control
3060 * @mcast_thresh: represents the upper threshold for multicast storm control
3061 * @ctl_bitmask: storm control knobs
3063 * Sets the storm control configuration (0x0280)
3066 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3069 struct ice_aqc_storm_cfg *cmd;
3070 struct ice_aq_desc desc;
3072 cmd = &desc.params.storm_conf;
3074 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3076 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3077 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3078 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3080 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3084 * ice_aq_get_storm_ctrl - gets storm control configuration
3085 * @hw: pointer to the HW struct
3086 * @bcast_thresh: represents the upper threshold for broadcast storm control
3087 * @mcast_thresh: represents the upper threshold for multicast storm control
3088 * @ctl_bitmask: storm control knobs
3090 * Gets the storm control configuration (0x0281)
3093 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3096 enum ice_status status;
3097 struct ice_aq_desc desc;
3099 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3101 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3103 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3106 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3109 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3112 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3119 * ice_aq_sw_rules - add/update/remove switch rules
3120 * @hw: pointer to the HW struct
3121 * @rule_list: pointer to switch rule population list
3122 * @rule_list_sz: total size of the rule list in bytes
3123 * @num_rules: number of switch rules in the rule_list
3124 * @opc: switch rules population command type - pass in the command opcode
3125 * @cd: pointer to command details structure or NULL
3127 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3129 static enum ice_status
3130 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3131 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3133 struct ice_aq_desc desc;
3134 enum ice_status status;
3136 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3138 if (opc != ice_aqc_opc_add_sw_rules &&
3139 opc != ice_aqc_opc_update_sw_rules &&
3140 opc != ice_aqc_opc_remove_sw_rules)
3141 return ICE_ERR_PARAM;
3143 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3145 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3146 desc.params.sw_rules.num_rules_fltr_entry_index =
3147 CPU_TO_LE16(num_rules);
3148 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3149 if (opc != ice_aqc_opc_add_sw_rules &&
3150 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3151 status = ICE_ERR_DOES_NOT_EXIST;
3157 * ice_aq_add_recipe - add switch recipe
3158 * @hw: pointer to the HW struct
3159 * @s_recipe_list: pointer to switch rule population list
3160 * @num_recipes: number of switch recipes in the list
3161 * @cd: pointer to command details structure or NULL
3166 ice_aq_add_recipe(struct ice_hw *hw,
3167 struct ice_aqc_recipe_data_elem *s_recipe_list,
3168 u16 num_recipes, struct ice_sq_cd *cd)
3170 struct ice_aqc_add_get_recipe *cmd;
3171 struct ice_aq_desc desc;
3174 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3175 cmd = &desc.params.add_get_recipe;
3176 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3178 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3179 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3181 buf_size = num_recipes * sizeof(*s_recipe_list);
3183 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3187 * ice_aq_get_recipe - get switch recipe
3188 * @hw: pointer to the HW struct
3189 * @s_recipe_list: pointer to switch rule population list
3190 * @num_recipes: pointer to the number of recipes (input and output)
3191 * @recipe_root: root recipe number of recipe(s) to retrieve
3192 * @cd: pointer to command details structure or NULL
3196 * On input, *num_recipes should equal the number of entries in s_recipe_list.
3197 * On output, *num_recipes will equal the number of entries returned in
3200 * The caller must supply enough space in s_recipe_list to hold all possible
3201 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3204 ice_aq_get_recipe(struct ice_hw *hw,
3205 struct ice_aqc_recipe_data_elem *s_recipe_list,
3206 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3208 struct ice_aqc_add_get_recipe *cmd;
3209 struct ice_aq_desc desc;
3210 enum ice_status status;
3213 if (*num_recipes != ICE_MAX_NUM_RECIPES)
3214 return ICE_ERR_PARAM;
3216 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3217 cmd = &desc.params.add_get_recipe;
3218 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3220 cmd->return_index = CPU_TO_LE16(recipe_root);
3221 cmd->num_sub_recipes = 0;
3223 buf_size = *num_recipes * sizeof(*s_recipe_list);
3225 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3226 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3232 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3233 * @hw: pointer to the HW struct
3234 * @params: parameters used to update the default recipe
3236 * This function only supports updating default recipes and it only supports
3237 * updating a single recipe based on the lkup_idx at a time.
3239 * This is done as a read-modify-write operation. First, get the current recipe
3240 * contents based on the recipe's ID. Then modify the field vector index and
3241 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3242 * the pre-existing recipe with the modifications.
3245 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3246 struct ice_update_recipe_lkup_idx_params *params)
3248 struct ice_aqc_recipe_data_elem *rcp_list;
3249 u16 num_recps = ICE_MAX_NUM_RECIPES;
3250 enum ice_status status;
3252 rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3254 return ICE_ERR_NO_MEMORY;
3256 /* read current recipe list from firmware */
3257 rcp_list->recipe_indx = params->rid;
3258 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3260 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3261 params->rid, status);
3265 /* only modify existing recipe's lkup_idx and mask if valid, while
3266 * leaving all other fields the same, then update the recipe firmware
3268 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3269 if (params->mask_valid)
3270 rcp_list->content.mask[params->lkup_idx] =
3271 CPU_TO_LE16(params->mask);
3273 if (params->ignore_valid)
3274 rcp_list->content.lkup_indx[params->lkup_idx] |=
3275 ICE_AQ_RECIPE_LKUP_IGNORE;
3277 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3279 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3280 params->rid, params->lkup_idx, params->fv_idx,
3281 params->mask, params->mask_valid ? "true" : "false",
3285 ice_free(hw, rcp_list);
3290 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3291 * @hw: pointer to the HW struct
3292 * @profile_id: package profile ID to associate the recipe with
3293 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3294 * @cd: pointer to command details structure or NULL
3295 * Recipe to profile association (0x0291)
3298 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3299 struct ice_sq_cd *cd)
3301 struct ice_aqc_recipe_to_profile *cmd;
3302 struct ice_aq_desc desc;
3304 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3305 cmd = &desc.params.recipe_to_profile;
3306 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3307 cmd->profile_id = CPU_TO_LE16(profile_id);
3308 /* Set the recipe ID bit in the bitmask to let the device know which
3309 * profile we are associating the recipe to
3311 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3312 ICE_NONDMA_TO_NONDMA);
3314 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3318 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3319 * @hw: pointer to the HW struct
3320 * @profile_id: package profile ID to associate the recipe with
3321 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3322 * @cd: pointer to command details structure or NULL
3323 * Associate profile ID with given recipe (0x0293)
3326 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3327 struct ice_sq_cd *cd)
3329 struct ice_aqc_recipe_to_profile *cmd;
3330 struct ice_aq_desc desc;
3331 enum ice_status status;
3333 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3334 cmd = &desc.params.recipe_to_profile;
3335 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3336 cmd->profile_id = CPU_TO_LE16(profile_id);
3338 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3340 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3341 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3347 * ice_alloc_recipe - add recipe resource
3348 * @hw: pointer to the hardware structure
3349 * @rid: recipe ID returned as response to AQ call
3351 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3353 struct ice_aqc_alloc_free_res_elem *sw_buf;
3354 enum ice_status status;
3357 buf_len = ice_struct_size(sw_buf, elem, 1);
3358 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3360 return ICE_ERR_NO_MEMORY;
3362 sw_buf->num_elems = CPU_TO_LE16(1);
3363 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3364 ICE_AQC_RES_TYPE_S) |
3365 ICE_AQC_RES_TYPE_FLAG_SHARED);
3366 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3367 ice_aqc_opc_alloc_res, NULL);
3369 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3370 ice_free(hw, sw_buf);
3375 /* ice_init_port_info - Initialize port_info with switch configuration data
3376 * @pi: pointer to port_info
3377 * @vsi_port_num: VSI number or port number
3378 * @type: Type of switch element (port or VSI)
3379 * @swid: switch ID of the switch the element is attached to
3380 * @pf_vf_num: PF or VF number
3381 * @is_vf: true if the element is a VF, false otherwise
3384 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3385 u16 swid, u16 pf_vf_num, bool is_vf)
3388 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3389 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3391 pi->pf_vf_num = pf_vf_num;
3393 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3394 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3397 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3402 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3403 * @hw: pointer to the hardware structure
3405 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3407 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3408 enum ice_status status;
3415 num_total_ports = 1;
3417 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3418 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3421 return ICE_ERR_NO_MEMORY;
3423 /* Multiple calls to ice_aq_get_sw_cfg may be required
3424 * to get all the switch configuration information. The need
3425 * for additional calls is indicated by ice_aq_get_sw_cfg
3426 * writing a non-zero value in req_desc
3429 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3431 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3432 &req_desc, &num_elems, NULL);
3437 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3438 u16 pf_vf_num, swid, vsi_port_num;
3442 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3443 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3445 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3446 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3448 swid = LE16_TO_CPU(ele->swid);
3450 if (LE16_TO_CPU(ele->pf_vf_num) &
3451 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3454 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3455 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3458 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3459 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3460 if (j == num_total_ports) {
3461 ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3462 status = ICE_ERR_CFG;
3465 ice_init_port_info(hw->port_info,
3466 vsi_port_num, res_type, swid,
3474 } while (req_desc && !status);
3482 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3483 * @hw: pointer to the hardware structure
3484 * @fi: filter info structure to fill/update
3486 * This helper function populates the lb_en and lan_en elements of the provided
3487 * ice_fltr_info struct using the switch's type and characteristics of the
3488 * switch rule being configured.
3490 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3492 if ((fi->flag & ICE_FLTR_RX) &&
3493 (fi->fltr_act == ICE_FWD_TO_VSI ||
3494 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3495 fi->lkup_type == ICE_SW_LKUP_LAST)
3499 if ((fi->flag & ICE_FLTR_TX) &&
3500 (fi->fltr_act == ICE_FWD_TO_VSI ||
3501 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3502 fi->fltr_act == ICE_FWD_TO_Q ||
3503 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3504 /* Setting LB for prune actions will result in replicated
3505 * packets to the internal switch that will be dropped.
3507 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3510 /* Set lan_en to TRUE if
3511 * 1. The switch is a VEB AND
3513 * 2.1 The lookup is a directional lookup like ethertype,
3514 * promiscuous, ethertype-MAC, promiscuous-VLAN
3515 * and default-port OR
3516 * 2.2 The lookup is VLAN, OR
3517 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3518 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3522 * The switch is a VEPA.
3524 * In all other cases, the LAN enable has to be set to false.
3527 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3528 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3529 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3530 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3531 fi->lkup_type == ICE_SW_LKUP_DFLT ||
3532 fi->lkup_type == ICE_SW_LKUP_VLAN ||
3533 (fi->lkup_type == ICE_SW_LKUP_MAC &&
3534 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3535 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3536 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3545 * ice_fill_sw_rule - Helper function to fill switch rule structure
3546 * @hw: pointer to the hardware structure
3547 * @f_info: entry containing packet forwarding information
3548 * @s_rule: switch rule structure to be filled in based on mac_entry
3549 * @opc: switch rules population command type - pass in the command opcode
3552 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3553 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3555 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3556 u16 vlan_tpid = ICE_ETH_P_8021Q;
3564 if (opc == ice_aqc_opc_remove_sw_rules) {
3565 s_rule->pdata.lkup_tx_rx.act = 0;
3566 s_rule->pdata.lkup_tx_rx.index =
3567 CPU_TO_LE16(f_info->fltr_rule_id);
3568 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3572 eth_hdr_sz = sizeof(dummy_eth_header);
3573 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3575 /* initialize the ether header with a dummy header */
3576 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3577 ice_fill_sw_info(hw, f_info);
3579 switch (f_info->fltr_act) {
3580 case ICE_FWD_TO_VSI:
3581 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3582 ICE_SINGLE_ACT_VSI_ID_M;
3583 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3584 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3585 ICE_SINGLE_ACT_VALID_BIT;
3587 case ICE_FWD_TO_VSI_LIST:
3588 act |= ICE_SINGLE_ACT_VSI_LIST;
3589 act |= (f_info->fwd_id.vsi_list_id <<
3590 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3591 ICE_SINGLE_ACT_VSI_LIST_ID_M;
3592 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3593 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3594 ICE_SINGLE_ACT_VALID_BIT;
3597 act |= ICE_SINGLE_ACT_TO_Q;
3598 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3599 ICE_SINGLE_ACT_Q_INDEX_M;
3601 case ICE_DROP_PACKET:
3602 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3603 ICE_SINGLE_ACT_VALID_BIT;
3605 case ICE_FWD_TO_QGRP:
3606 q_rgn = f_info->qgrp_size > 0 ?
3607 (u8)ice_ilog2(f_info->qgrp_size) : 0;
3608 act |= ICE_SINGLE_ACT_TO_Q;
3609 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3610 ICE_SINGLE_ACT_Q_INDEX_M;
3611 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3612 ICE_SINGLE_ACT_Q_REGION_M;
3619 act |= ICE_SINGLE_ACT_LB_ENABLE;
3621 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3623 switch (f_info->lkup_type) {
3624 case ICE_SW_LKUP_MAC:
3625 daddr = f_info->l_data.mac.mac_addr;
3627 case ICE_SW_LKUP_VLAN:
3628 vlan_id = f_info->l_data.vlan.vlan_id;
3629 if (f_info->l_data.vlan.tpid_valid)
3630 vlan_tpid = f_info->l_data.vlan.tpid;
3631 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3632 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3633 act |= ICE_SINGLE_ACT_PRUNE;
3634 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3637 case ICE_SW_LKUP_ETHERTYPE_MAC:
3638 daddr = f_info->l_data.ethertype_mac.mac_addr;
3640 case ICE_SW_LKUP_ETHERTYPE:
3641 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3642 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3644 case ICE_SW_LKUP_MAC_VLAN:
3645 daddr = f_info->l_data.mac_vlan.mac_addr;
3646 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3648 case ICE_SW_LKUP_PROMISC_VLAN:
3649 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3651 case ICE_SW_LKUP_PROMISC:
3652 daddr = f_info->l_data.mac_vlan.mac_addr;
3658 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3659 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3660 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3662 /* Recipe set depending on lookup type */
3663 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3664 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3665 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3668 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3669 ICE_NONDMA_TO_NONDMA);
3671 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3672 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3673 *off = CPU_TO_BE16(vlan_id);
3674 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3675 *off = CPU_TO_BE16(vlan_tpid);
3678 /* Create the switch rule with the final dummy Ethernet header */
3679 if (opc != ice_aqc_opc_update_sw_rules)
3680 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3684 * ice_add_marker_act
3685 * @hw: pointer to the hardware structure
3686 * @m_ent: the management entry for which sw marker needs to be added
3687 * @sw_marker: sw marker to tag the Rx descriptor with
3688 * @l_id: large action resource ID
3690 * Create a large action to hold software marker and update the switch rule
3691 * entry pointed by m_ent with newly created large action
3693 static enum ice_status
3694 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3695 u16 sw_marker, u16 l_id)
3697 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3698 /* For software marker we need 3 large actions
3699 * 1. FWD action: FWD TO VSI or VSI LIST
3700 * 2. GENERIC VALUE action to hold the profile ID
3701 * 3. GENERIC VALUE action to hold the software marker ID
3703 const u16 num_lg_acts = 3;
3704 enum ice_status status;
3710 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3711 return ICE_ERR_PARAM;
3713 /* Create two back-to-back switch rules and submit them to the HW using
3714 * one memory buffer:
3718 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3719 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3720 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3722 return ICE_ERR_NO_MEMORY;
3724 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3726 /* Fill in the first switch rule i.e. large action */
3727 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3728 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3729 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3731 /* First action VSI forwarding or VSI list forwarding depending on how
3734 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3735 m_ent->fltr_info.fwd_id.hw_vsi_id;
3737 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3738 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3739 if (m_ent->vsi_count > 1)
3740 act |= ICE_LG_ACT_VSI_LIST;
3741 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3743 /* Second action descriptor type */
3744 act = ICE_LG_ACT_GENERIC;
3746 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3747 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3749 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3750 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3752 /* Third action Marker value */
3753 act |= ICE_LG_ACT_GENERIC;
3754 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3755 ICE_LG_ACT_GENERIC_VALUE_M;
3757 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3759 /* call the fill switch rule to fill the lookup Tx Rx structure */
3760 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3761 ice_aqc_opc_update_sw_rules);
3763 /* Update the action to point to the large action ID */
3764 rx_tx->pdata.lkup_tx_rx.act =
3765 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3766 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3767 ICE_SINGLE_ACT_PTR_VAL_M));
3769 /* Use the filter rule ID of the previously created rule with single
3770 * act. Once the update happens, hardware will treat this as large
3773 rx_tx->pdata.lkup_tx_rx.index =
3774 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3776 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3777 ice_aqc_opc_update_sw_rules, NULL);
3779 m_ent->lg_act_idx = l_id;
3780 m_ent->sw_marker_id = sw_marker;
3783 ice_free(hw, lg_act);
3788 * ice_add_counter_act - add/update filter rule with counter action
3789 * @hw: pointer to the hardware structure
3790 * @m_ent: the management entry for which counter needs to be added
3791 * @counter_id: VLAN counter ID returned as part of allocate resource
3792 * @l_id: large action resource ID
3794 static enum ice_status
3795 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3796 u16 counter_id, u16 l_id)
3798 struct ice_aqc_sw_rules_elem *lg_act;
3799 struct ice_aqc_sw_rules_elem *rx_tx;
3800 enum ice_status status;
3801 /* 2 actions will be added while adding a large action counter */
3802 const int num_acts = 2;
3809 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3810 return ICE_ERR_PARAM;
3812 /* Create two back-to-back switch rules and submit them to the HW using
3813 * one memory buffer:
3817 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3818 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3819 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3821 return ICE_ERR_NO_MEMORY;
3823 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3825 /* Fill in the first switch rule i.e. large action */
3826 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3827 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3828 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3830 /* First action VSI forwarding or VSI list forwarding depending on how
3833 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3834 m_ent->fltr_info.fwd_id.hw_vsi_id;
3836 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3837 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3838 ICE_LG_ACT_VSI_LIST_ID_M;
3839 if (m_ent->vsi_count > 1)
3840 act |= ICE_LG_ACT_VSI_LIST;
3841 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3843 /* Second action counter ID */
3844 act = ICE_LG_ACT_STAT_COUNT;
3845 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3846 ICE_LG_ACT_STAT_COUNT_M;
3847 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3849 /* call the fill switch rule to fill the lookup Tx Rx structure */
3850 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3851 ice_aqc_opc_update_sw_rules);
3853 act = ICE_SINGLE_ACT_PTR;
3854 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3855 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3857 /* Use the filter rule ID of the previously created rule with single
3858 * act. Once the update happens, hardware will treat this as large
3861 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3862 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3864 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3865 ice_aqc_opc_update_sw_rules, NULL);
3867 m_ent->lg_act_idx = l_id;
3868 m_ent->counter_index = counter_id;
3871 ice_free(hw, lg_act);
3876 * ice_create_vsi_list_map
3877 * @hw: pointer to the hardware structure
3878 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3879 * @num_vsi: number of VSI handles in the array
3880 * @vsi_list_id: VSI list ID generated as part of allocate resource
3882 * Helper function to create a new entry of VSI list ID to VSI mapping
3883 * using the given VSI list ID
3885 static struct ice_vsi_list_map_info *
3886 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3889 struct ice_switch_info *sw = hw->switch_info;
3890 struct ice_vsi_list_map_info *v_map;
3893 v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3897 v_map->vsi_list_id = vsi_list_id;
3899 for (i = 0; i < num_vsi; i++)
3900 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3902 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3907 * ice_update_vsi_list_rule
3908 * @hw: pointer to the hardware structure
3909 * @vsi_handle_arr: array of VSI handles to form a VSI list
3910 * @num_vsi: number of VSI handles in the array
3911 * @vsi_list_id: VSI list ID generated as part of allocate resource
3912 * @remove: Boolean value to indicate if this is a remove action
3913 * @opc: switch rules population command type - pass in the command opcode
3914 * @lkup_type: lookup type of the filter
3916 * Call AQ command to add a new switch rule or update existing switch rule
3917 * using the given VSI list ID
3919 static enum ice_status
3920 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3921 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3922 enum ice_sw_lkup_type lkup_type)
3924 struct ice_aqc_sw_rules_elem *s_rule;
3925 enum ice_status status;
3931 return ICE_ERR_PARAM;
3933 if (lkup_type == ICE_SW_LKUP_MAC ||
3934 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3935 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3936 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3937 lkup_type == ICE_SW_LKUP_PROMISC ||
3938 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3939 lkup_type == ICE_SW_LKUP_LAST)
3940 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3941 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3942 else if (lkup_type == ICE_SW_LKUP_VLAN)
3943 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3944 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3946 return ICE_ERR_PARAM;
3948 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3949 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3951 return ICE_ERR_NO_MEMORY;
3952 for (i = 0; i < num_vsi; i++) {
3953 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3954 status = ICE_ERR_PARAM;
3957 /* AQ call requires hw_vsi_id(s) */
3958 s_rule->pdata.vsi_list.vsi[i] =
3959 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3962 s_rule->type = CPU_TO_LE16(rule_type);
3963 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3964 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3966 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3969 ice_free(hw, s_rule);
3974 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3975 * @hw: pointer to the HW struct
3976 * @vsi_handle_arr: array of VSI handles to form a VSI list
3977 * @num_vsi: number of VSI handles in the array
3978 * @vsi_list_id: stores the ID of the VSI list to be created
3979 * @lkup_type: switch rule filter's lookup type
3981 static enum ice_status
3982 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3983 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3985 enum ice_status status;
3987 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3988 ice_aqc_opc_alloc_res);
3992 /* Update the newly created VSI list to include the specified VSIs */
3993 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3994 *vsi_list_id, false,
3995 ice_aqc_opc_add_sw_rules, lkup_type);
3999 * ice_create_pkt_fwd_rule
4000 * @hw: pointer to the hardware structure
4001 * @recp_list: corresponding filter management list
4002 * @f_entry: entry containing packet forwarding information
4004 * Create switch rule with given filter information and add an entry
4005 * to the corresponding filter management list to track this switch rule
4008 static enum ice_status
4009 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4010 struct ice_fltr_list_entry *f_entry)
4012 struct ice_fltr_mgmt_list_entry *fm_entry;
4013 struct ice_aqc_sw_rules_elem *s_rule;
4014 enum ice_status status;
4016 s_rule = (struct ice_aqc_sw_rules_elem *)
4017 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4019 return ICE_ERR_NO_MEMORY;
4020 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4021 ice_malloc(hw, sizeof(*fm_entry));
4023 status = ICE_ERR_NO_MEMORY;
4024 goto ice_create_pkt_fwd_rule_exit;
4027 fm_entry->fltr_info = f_entry->fltr_info;
4029 /* Initialize all the fields for the management entry */
4030 fm_entry->vsi_count = 1;
4031 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4032 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4033 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4035 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4036 ice_aqc_opc_add_sw_rules);
4038 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4039 ice_aqc_opc_add_sw_rules, NULL);
4041 ice_free(hw, fm_entry);
4042 goto ice_create_pkt_fwd_rule_exit;
4045 f_entry->fltr_info.fltr_rule_id =
4046 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4047 fm_entry->fltr_info.fltr_rule_id =
4048 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4050 /* The book keeping entries will get removed when base driver
4051 * calls remove filter AQ command
4053 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4055 ice_create_pkt_fwd_rule_exit:
4056 ice_free(hw, s_rule);
4061 * ice_update_pkt_fwd_rule
4062 * @hw: pointer to the hardware structure
4063 * @f_info: filter information for switch rule
4065 * Call AQ command to update a previously created switch rule with a
4068 static enum ice_status
4069 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4071 struct ice_aqc_sw_rules_elem *s_rule;
4072 enum ice_status status;
4074 s_rule = (struct ice_aqc_sw_rules_elem *)
4075 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4077 return ICE_ERR_NO_MEMORY;
4079 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4081 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4083 /* Update switch rule with new rule set to forward VSI list */
4084 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4085 ice_aqc_opc_update_sw_rules, NULL);
4087 ice_free(hw, s_rule);
4092 * ice_update_sw_rule_bridge_mode
4093 * @hw: pointer to the HW struct
4095 * Updates unicast switch filter rules based on VEB/VEPA mode
4097 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4099 struct ice_switch_info *sw = hw->switch_info;
4100 struct ice_fltr_mgmt_list_entry *fm_entry;
4101 enum ice_status status = ICE_SUCCESS;
4102 struct LIST_HEAD_TYPE *rule_head;
4103 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4105 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4106 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4108 ice_acquire_lock(rule_lock);
4109 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4111 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4112 u8 *addr = fi->l_data.mac.mac_addr;
4114 /* Update unicast Tx rules to reflect the selected
4117 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4118 (fi->fltr_act == ICE_FWD_TO_VSI ||
4119 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4120 fi->fltr_act == ICE_FWD_TO_Q ||
4121 fi->fltr_act == ICE_FWD_TO_QGRP)) {
4122 status = ice_update_pkt_fwd_rule(hw, fi);
4128 ice_release_lock(rule_lock);
4134 * ice_add_update_vsi_list
4135 * @hw: pointer to the hardware structure
4136 * @m_entry: pointer to current filter management list entry
4137 * @cur_fltr: filter information from the book keeping entry
4138 * @new_fltr: filter information with the new VSI to be added
4140 * Call AQ command to add or update previously created VSI list with new VSI.
4142 * Helper function to do book keeping associated with adding filter information
4143 * The algorithm to do the book keeping is described below :
4144 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4145 * if only one VSI has been added till now
4146 * Allocate a new VSI list and add two VSIs
4147 * to this list using switch rule command
4148 * Update the previously created switch rule with the
4149 * newly created VSI list ID
4150 * if a VSI list was previously created
4151 * Add the new VSI to the previously created VSI list set
4152 * using the update switch rule command
4154 static enum ice_status
4155 ice_add_update_vsi_list(struct ice_hw *hw,
4156 struct ice_fltr_mgmt_list_entry *m_entry,
4157 struct ice_fltr_info *cur_fltr,
4158 struct ice_fltr_info *new_fltr)
4160 enum ice_status status = ICE_SUCCESS;
4161 u16 vsi_list_id = 0;
4163 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4164 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4165 return ICE_ERR_NOT_IMPL;
4167 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4168 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4169 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4170 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4171 return ICE_ERR_NOT_IMPL;
4173 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4174 /* Only one entry existed in the mapping and it was not already
4175 * a part of a VSI list. So, create a VSI list with the old and
4178 struct ice_fltr_info tmp_fltr;
4179 u16 vsi_handle_arr[2];
4181 /* A rule already exists with the new VSI being added */
4182 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4183 return ICE_ERR_ALREADY_EXISTS;
4185 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4186 vsi_handle_arr[1] = new_fltr->vsi_handle;
4187 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4189 new_fltr->lkup_type);
4193 tmp_fltr = *new_fltr;
4194 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4195 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4196 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4197 /* Update the previous switch rule of "MAC forward to VSI" to
4198 * "MAC fwd to VSI list"
4200 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4204 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4205 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4206 m_entry->vsi_list_info =
4207 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4210 if (!m_entry->vsi_list_info)
4211 return ICE_ERR_NO_MEMORY;
4213 /* If this entry was large action then the large action needs
4214 * to be updated to point to FWD to VSI list
4216 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4218 ice_add_marker_act(hw, m_entry,
4219 m_entry->sw_marker_id,
4220 m_entry->lg_act_idx);
4222 u16 vsi_handle = new_fltr->vsi_handle;
4223 enum ice_adminq_opc opcode;
4225 if (!m_entry->vsi_list_info)
4228 /* A rule already exists with the new VSI being added */
4229 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4232 /* Update the previously created VSI list set with
4233 * the new VSI ID passed in
4235 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4236 opcode = ice_aqc_opc_update_sw_rules;
4238 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4239 vsi_list_id, false, opcode,
4240 new_fltr->lkup_type);
4241 /* update VSI list mapping info with new VSI ID */
4243 ice_set_bit(vsi_handle,
4244 m_entry->vsi_list_info->vsi_map);
4247 m_entry->vsi_count++;
4252 * ice_find_rule_entry - Search a rule entry
4253 * @list_head: head of rule list
4254 * @f_info: rule information
4256 * Helper function to search for a given rule entry
4257 * Returns pointer to entry storing the rule if found
4259 static struct ice_fltr_mgmt_list_entry *
4260 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4261 struct ice_fltr_info *f_info)
4263 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4265 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4267 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4268 sizeof(f_info->l_data)) &&
4269 f_info->flag == list_itr->fltr_info.flag) {
4278 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4279 * @recp_list: VSI lists needs to be searched
4280 * @vsi_handle: VSI handle to be found in VSI list
4281 * @vsi_list_id: VSI list ID found containing vsi_handle
4283 * Helper function to search a VSI list with single entry containing given VSI
4284 * handle element. This can be extended further to search VSI list with more
4285 * than 1 vsi_count. Returns pointer to VSI list entry if found.
4287 static struct ice_vsi_list_map_info *
4288 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4291 struct ice_vsi_list_map_info *map_info = NULL;
4292 struct LIST_HEAD_TYPE *list_head;
4294 list_head = &recp_list->filt_rules;
4295 if (recp_list->adv_rule) {
4296 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4298 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4299 ice_adv_fltr_mgmt_list_entry,
4301 if (list_itr->vsi_list_info) {
4302 map_info = list_itr->vsi_list_info;
4303 if (ice_is_bit_set(map_info->vsi_map,
4305 *vsi_list_id = map_info->vsi_list_id;
4311 struct ice_fltr_mgmt_list_entry *list_itr;
4313 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4314 ice_fltr_mgmt_list_entry,
4316 if (list_itr->vsi_count == 1 &&
4317 list_itr->vsi_list_info) {
4318 map_info = list_itr->vsi_list_info;
4319 if (ice_is_bit_set(map_info->vsi_map,
4321 *vsi_list_id = map_info->vsi_list_id;
4331 * ice_add_rule_internal - add rule for a given lookup type
4332 * @hw: pointer to the hardware structure
4333 * @recp_list: recipe list for which rule has to be added
4334 * @lport: logic port number on which function add rule
4335 * @f_entry: structure containing MAC forwarding information
4337 * Adds or updates the rule lists for a given recipe
4339 static enum ice_status
4340 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4341 u8 lport, struct ice_fltr_list_entry *f_entry)
4343 struct ice_fltr_info *new_fltr, *cur_fltr;
4344 struct ice_fltr_mgmt_list_entry *m_entry;
4345 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4346 enum ice_status status = ICE_SUCCESS;
4348 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4349 return ICE_ERR_PARAM;
4351 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4352 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4353 f_entry->fltr_info.fwd_id.hw_vsi_id =
4354 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4356 rule_lock = &recp_list->filt_rule_lock;
4358 ice_acquire_lock(rule_lock);
4359 new_fltr = &f_entry->fltr_info;
4360 if (new_fltr->flag & ICE_FLTR_RX)
4361 new_fltr->src = lport;
4362 else if (new_fltr->flag & ICE_FLTR_TX)
4364 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4366 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4368 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4369 goto exit_add_rule_internal;
4372 cur_fltr = &m_entry->fltr_info;
4373 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4375 exit_add_rule_internal:
4376 ice_release_lock(rule_lock);
4381 * ice_remove_vsi_list_rule
4382 * @hw: pointer to the hardware structure
4383 * @vsi_list_id: VSI list ID generated as part of allocate resource
4384 * @lkup_type: switch rule filter lookup type
4386 * The VSI list should be emptied before this function is called to remove the
4389 static enum ice_status
4390 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4391 enum ice_sw_lkup_type lkup_type)
4393 /* Free the vsi_list resource that we allocated. It is assumed that the
4394 * list is empty at this point.
4396 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4397 ice_aqc_opc_free_res);
4401 * ice_rem_update_vsi_list
4402 * @hw: pointer to the hardware structure
4403 * @vsi_handle: VSI handle of the VSI to remove
4404 * @fm_list: filter management entry for which the VSI list management needs to
4407 static enum ice_status
4408 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4409 struct ice_fltr_mgmt_list_entry *fm_list)
4411 enum ice_sw_lkup_type lkup_type;
4412 enum ice_status status = ICE_SUCCESS;
4415 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4416 fm_list->vsi_count == 0)
4417 return ICE_ERR_PARAM;
4419 /* A rule with the VSI being removed does not exist */
4420 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4421 return ICE_ERR_DOES_NOT_EXIST;
4423 lkup_type = fm_list->fltr_info.lkup_type;
4424 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4425 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4426 ice_aqc_opc_update_sw_rules,
4431 fm_list->vsi_count--;
4432 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4434 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4435 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4436 struct ice_vsi_list_map_info *vsi_list_info =
4437 fm_list->vsi_list_info;
4440 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4442 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4443 return ICE_ERR_OUT_OF_RANGE;
4445 /* Make sure VSI list is empty before removing it below */
4446 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4448 ice_aqc_opc_update_sw_rules,
4453 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4454 tmp_fltr_info.fwd_id.hw_vsi_id =
4455 ice_get_hw_vsi_num(hw, rem_vsi_handle);
4456 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4457 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4459 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4460 tmp_fltr_info.fwd_id.hw_vsi_id, status);
4464 fm_list->fltr_info = tmp_fltr_info;
4467 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4468 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4469 struct ice_vsi_list_map_info *vsi_list_info =
4470 fm_list->vsi_list_info;
4472 /* Remove the VSI list since it is no longer used */
4473 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4475 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4476 vsi_list_id, status);
4480 LIST_DEL(&vsi_list_info->list_entry);
4481 ice_free(hw, vsi_list_info);
4482 fm_list->vsi_list_info = NULL;
4489 * ice_remove_rule_internal - Remove a filter rule of a given type
4491 * @hw: pointer to the hardware structure
4492 * @recp_list: recipe list for which the rule needs to removed
4493 * @f_entry: rule entry containing filter information
4495 static enum ice_status
4496 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4497 struct ice_fltr_list_entry *f_entry)
4499 struct ice_fltr_mgmt_list_entry *list_elem;
4500 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4501 enum ice_status status = ICE_SUCCESS;
4502 bool remove_rule = false;
4505 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4506 return ICE_ERR_PARAM;
4507 f_entry->fltr_info.fwd_id.hw_vsi_id =
4508 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4510 rule_lock = &recp_list->filt_rule_lock;
4511 ice_acquire_lock(rule_lock);
4512 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4513 &f_entry->fltr_info);
4515 status = ICE_ERR_DOES_NOT_EXIST;
4519 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4521 } else if (!list_elem->vsi_list_info) {
4522 status = ICE_ERR_DOES_NOT_EXIST;
4524 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4525 /* a ref_cnt > 1 indicates that the vsi_list is being
4526 * shared by multiple rules. Decrement the ref_cnt and
4527 * remove this rule, but do not modify the list, as it
4528 * is in-use by other rules.
4530 list_elem->vsi_list_info->ref_cnt--;
4533 /* a ref_cnt of 1 indicates the vsi_list is only used
4534 * by one rule. However, the original removal request is only
4535 * for a single VSI. Update the vsi_list first, and only
4536 * remove the rule if there are no further VSIs in this list.
4538 vsi_handle = f_entry->fltr_info.vsi_handle;
4539 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4542 /* if VSI count goes to zero after updating the VSI list */
4543 if (list_elem->vsi_count == 0)
4548 /* Remove the lookup rule */
4549 struct ice_aqc_sw_rules_elem *s_rule;
4551 s_rule = (struct ice_aqc_sw_rules_elem *)
4552 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4554 status = ICE_ERR_NO_MEMORY;
4558 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4559 ice_aqc_opc_remove_sw_rules);
4561 status = ice_aq_sw_rules(hw, s_rule,
4562 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4563 ice_aqc_opc_remove_sw_rules, NULL);
4565 /* Remove a book keeping from the list */
4566 ice_free(hw, s_rule);
4571 LIST_DEL(&list_elem->list_entry);
4572 ice_free(hw, list_elem);
4575 ice_release_lock(rule_lock);
4580 * ice_aq_get_res_alloc - get allocated resources
4581 * @hw: pointer to the HW struct
4582 * @num_entries: pointer to u16 to store the number of resource entries returned
4583 * @buf: pointer to buffer
4584 * @buf_size: size of buf
4585 * @cd: pointer to command details structure or NULL
4587 * The caller-supplied buffer must be large enough to store the resource
4588 * information for all resource types. Each resource type is an
4589 * ice_aqc_get_res_resp_elem structure.
4592 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4593 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4594 struct ice_sq_cd *cd)
4596 struct ice_aqc_get_res_alloc *resp;
4597 enum ice_status status;
4598 struct ice_aq_desc desc;
4601 return ICE_ERR_BAD_PTR;
4603 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4604 return ICE_ERR_INVAL_SIZE;
4606 resp = &desc.params.get_res;
4608 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4609 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4611 if (!status && num_entries)
4612 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4618 * ice_aq_get_res_descs - get allocated resource descriptors
4619 * @hw: pointer to the hardware structure
4620 * @num_entries: number of resource entries in buffer
4621 * @buf: structure to hold response data buffer
4622 * @buf_size: size of buffer
4623 * @res_type: resource type
4624 * @res_shared: is resource shared
4625 * @desc_id: input - first desc ID to start; output - next desc ID
4626 * @cd: pointer to command details structure or NULL
4629 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4630 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4631 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4633 struct ice_aqc_get_allocd_res_desc *cmd;
4634 struct ice_aq_desc desc;
4635 enum ice_status status;
4637 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4639 cmd = &desc.params.get_res_desc;
4642 return ICE_ERR_PARAM;
4644 if (buf_size != (num_entries * sizeof(*buf)))
4645 return ICE_ERR_PARAM;
4647 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4649 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4650 ICE_AQC_RES_TYPE_M) | (res_shared ?
4651 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4652 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4654 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4656 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4662 * ice_add_mac_rule - Add a MAC address based filter rule
4663 * @hw: pointer to the hardware structure
4664 * @m_list: list of MAC addresses and forwarding information
4665 * @sw: pointer to switch info struct for which function add rule
4666 * @lport: logic port number on which function add rule
4668 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4669 * multiple unicast addresses, the function assumes that all the
4670 * addresses are unique in a given add_mac call. It doesn't
4671 * check for duplicates in this case, removing duplicates from a given
4672 * list should be taken care of in the caller of this function.
4674 static enum ice_status
4675 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4676 struct ice_switch_info *sw, u8 lport)
4678 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4679 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4680 struct ice_fltr_list_entry *m_list_itr;
4681 struct LIST_HEAD_TYPE *rule_head;
4682 u16 total_elem_left, s_rule_size;
4683 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4684 enum ice_status status = ICE_SUCCESS;
4685 u16 num_unicast = 0;
4689 rule_lock = &recp_list->filt_rule_lock;
4690 rule_head = &recp_list->filt_rules;
4692 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4694 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4698 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4699 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4700 if (!ice_is_vsi_valid(hw, vsi_handle))
4701 return ICE_ERR_PARAM;
4702 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4703 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4704 /* update the src in case it is VSI num */
4705 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4706 return ICE_ERR_PARAM;
4707 m_list_itr->fltr_info.src = hw_vsi_id;
4708 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4709 IS_ZERO_ETHER_ADDR(add))
4710 return ICE_ERR_PARAM;
4711 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4712 /* Don't overwrite the unicast address */
4713 ice_acquire_lock(rule_lock);
4714 if (ice_find_rule_entry(rule_head,
4715 &m_list_itr->fltr_info)) {
4716 ice_release_lock(rule_lock);
4717 return ICE_ERR_ALREADY_EXISTS;
4719 ice_release_lock(rule_lock);
4721 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4722 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4723 m_list_itr->status =
4724 ice_add_rule_internal(hw, recp_list, lport,
4726 if (m_list_itr->status)
4727 return m_list_itr->status;
4731 ice_acquire_lock(rule_lock);
4732 /* Exit if no suitable entries were found for adding bulk switch rule */
4734 status = ICE_SUCCESS;
4735 goto ice_add_mac_exit;
4738 /* Allocate switch rule buffer for the bulk update for unicast */
4739 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4740 s_rule = (struct ice_aqc_sw_rules_elem *)
4741 ice_calloc(hw, num_unicast, s_rule_size);
4743 status = ICE_ERR_NO_MEMORY;
4744 goto ice_add_mac_exit;
4748 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4750 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4751 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4753 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4754 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4755 ice_aqc_opc_add_sw_rules);
4756 r_iter = (struct ice_aqc_sw_rules_elem *)
4757 ((u8 *)r_iter + s_rule_size);
4761 /* Call AQ bulk switch rule update for all unicast addresses */
4763 /* Call AQ switch rule in AQ_MAX chunk */
4764 for (total_elem_left = num_unicast; total_elem_left > 0;
4765 total_elem_left -= elem_sent) {
4766 struct ice_aqc_sw_rules_elem *entry = r_iter;
4768 elem_sent = MIN_T(u8, total_elem_left,
4769 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4770 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4771 elem_sent, ice_aqc_opc_add_sw_rules,
4774 goto ice_add_mac_exit;
4775 r_iter = (struct ice_aqc_sw_rules_elem *)
4776 ((u8 *)r_iter + (elem_sent * s_rule_size));
4779 /* Fill up rule ID based on the value returned from FW */
4781 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4783 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4784 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4785 struct ice_fltr_mgmt_list_entry *fm_entry;
4787 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4788 f_info->fltr_rule_id =
4789 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4790 f_info->fltr_act = ICE_FWD_TO_VSI;
4791 /* Create an entry to track this MAC address */
4792 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4793 ice_malloc(hw, sizeof(*fm_entry));
4795 status = ICE_ERR_NO_MEMORY;
4796 goto ice_add_mac_exit;
4798 fm_entry->fltr_info = *f_info;
4799 fm_entry->vsi_count = 1;
4800 /* The book keeping entries will get removed when
4801 * base driver calls remove filter AQ command
4804 LIST_ADD(&fm_entry->list_entry, rule_head);
4805 r_iter = (struct ice_aqc_sw_rules_elem *)
4806 ((u8 *)r_iter + s_rule_size);
4811 ice_release_lock(rule_lock);
4813 ice_free(hw, s_rule);
4818 * ice_add_mac - Add a MAC address based filter rule
4819 * @hw: pointer to the hardware structure
4820 * @m_list: list of MAC addresses and forwarding information
4822 * Function add MAC rule for logical port from HW struct
4824 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4827 return ICE_ERR_PARAM;
4829 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4830 hw->port_info->lport);
4834 * ice_add_vlan_internal - Add one VLAN based filter rule
4835 * @hw: pointer to the hardware structure
4836 * @recp_list: recipe list for which rule has to be added
4837 * @f_entry: filter entry containing one VLAN information
4839 static enum ice_status
4840 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4841 struct ice_fltr_list_entry *f_entry)
4843 struct ice_fltr_mgmt_list_entry *v_list_itr;
4844 struct ice_fltr_info *new_fltr, *cur_fltr;
4845 enum ice_sw_lkup_type lkup_type;
4846 u16 vsi_list_id = 0, vsi_handle;
4847 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4848 enum ice_status status = ICE_SUCCESS;
4850 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4851 return ICE_ERR_PARAM;
4853 f_entry->fltr_info.fwd_id.hw_vsi_id =
4854 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4855 new_fltr = &f_entry->fltr_info;
4857 /* VLAN ID should only be 12 bits */
4858 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4859 return ICE_ERR_PARAM;
4861 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4862 return ICE_ERR_PARAM;
4864 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4865 lkup_type = new_fltr->lkup_type;
4866 vsi_handle = new_fltr->vsi_handle;
4867 rule_lock = &recp_list->filt_rule_lock;
4868 ice_acquire_lock(rule_lock);
4869 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4871 struct ice_vsi_list_map_info *map_info = NULL;
4873 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4874 /* All VLAN pruning rules use a VSI list. Check if
4875 * there is already a VSI list containing VSI that we
4876 * want to add. If found, use the same vsi_list_id for
4877 * this new VLAN rule or else create a new list.
4879 map_info = ice_find_vsi_list_entry(recp_list,
4883 status = ice_create_vsi_list_rule(hw,
4891 /* Convert the action to forwarding to a VSI list. */
4892 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4893 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4896 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4898 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4901 status = ICE_ERR_DOES_NOT_EXIST;
4904 /* reuse VSI list for new rule and increment ref_cnt */
4906 v_list_itr->vsi_list_info = map_info;
4907 map_info->ref_cnt++;
4909 v_list_itr->vsi_list_info =
4910 ice_create_vsi_list_map(hw, &vsi_handle,
4914 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4915 /* Update existing VSI list to add new VSI ID only if it used
4918 cur_fltr = &v_list_itr->fltr_info;
4919 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4922 /* If VLAN rule exists and VSI list being used by this rule is
4923 * referenced by more than 1 VLAN rule. Then create a new VSI
4924 * list appending previous VSI with new VSI and update existing
4925 * VLAN rule to point to new VSI list ID
4927 struct ice_fltr_info tmp_fltr;
4928 u16 vsi_handle_arr[2];
4931 /* Current implementation only supports reusing VSI list with
4932 * one VSI count. We should never hit below condition
4934 if (v_list_itr->vsi_count > 1 &&
4935 v_list_itr->vsi_list_info->ref_cnt > 1) {
4936 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4937 status = ICE_ERR_CFG;
4942 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4945 /* A rule already exists with the new VSI being added */
4946 if (cur_handle == vsi_handle) {
4947 status = ICE_ERR_ALREADY_EXISTS;
4951 vsi_handle_arr[0] = cur_handle;
4952 vsi_handle_arr[1] = vsi_handle;
4953 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4954 &vsi_list_id, lkup_type);
4958 tmp_fltr = v_list_itr->fltr_info;
4959 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4960 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4961 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4962 /* Update the previous switch rule to a new VSI list which
4963 * includes current VSI that is requested
4965 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4969 /* before overriding VSI list map info. decrement ref_cnt of
4972 v_list_itr->vsi_list_info->ref_cnt--;
4974 /* now update to newly created list */
4975 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4976 v_list_itr->vsi_list_info =
4977 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4979 v_list_itr->vsi_count++;
4983 ice_release_lock(rule_lock);
4988 * ice_add_vlan_rule - Add VLAN based filter rule
4989 * @hw: pointer to the hardware structure
4990 * @v_list: list of VLAN entries and forwarding information
4991 * @sw: pointer to switch info struct for which function add rule
4993 static enum ice_status
4994 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4995 struct ice_switch_info *sw)
4997 struct ice_fltr_list_entry *v_list_itr;
4998 struct ice_sw_recipe *recp_list;
5000 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5001 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5003 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5004 return ICE_ERR_PARAM;
5005 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5006 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5008 if (v_list_itr->status)
5009 return v_list_itr->status;
5015 * ice_add_vlan - Add a VLAN based filter rule
5016 * @hw: pointer to the hardware structure
5017 * @v_list: list of VLAN and forwarding information
5019 * Function add VLAN rule for logical port from HW struct
5021 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5024 return ICE_ERR_PARAM;
5026 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5030 * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5031 * @hw: pointer to the hardware structure
5032 * @mv_list: list of MAC and VLAN filters
5033 * @sw: pointer to switch info struct for which function add rule
5034 * @lport: logic port number on which function add rule
5036 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5037 * pruning bits enabled, then it is the responsibility of the caller to make
5038 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5039 * VLAN won't be received on that VSI otherwise.
5041 static enum ice_status
5042 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5043 struct ice_switch_info *sw, u8 lport)
5045 struct ice_fltr_list_entry *mv_list_itr;
5046 struct ice_sw_recipe *recp_list;
5048 if (!mv_list || !hw)
5049 return ICE_ERR_PARAM;
5051 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5052 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5054 enum ice_sw_lkup_type l_type =
5055 mv_list_itr->fltr_info.lkup_type;
5057 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5058 return ICE_ERR_PARAM;
5059 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5060 mv_list_itr->status =
5061 ice_add_rule_internal(hw, recp_list, lport,
5063 if (mv_list_itr->status)
5064 return mv_list_itr->status;
5070 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5071 * @hw: pointer to the hardware structure
5072 * @mv_list: list of MAC VLAN addresses and forwarding information
5074 * Function add MAC VLAN rule for logical port from HW struct
5077 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5079 if (!mv_list || !hw)
5080 return ICE_ERR_PARAM;
5082 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5083 hw->port_info->lport);
5087 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5088 * @hw: pointer to the hardware structure
5089 * @em_list: list of ether type MAC filter, MAC is optional
5090 * @sw: pointer to switch info struct for which function add rule
5091 * @lport: logic port number on which function add rule
5093 * This function requires the caller to populate the entries in
5094 * the filter list with the necessary fields (including flags to
5095 * indicate Tx or Rx rules).
5097 static enum ice_status
5098 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5099 struct ice_switch_info *sw, u8 lport)
5101 struct ice_fltr_list_entry *em_list_itr;
5103 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5105 struct ice_sw_recipe *recp_list;
5106 enum ice_sw_lkup_type l_type;
5108 l_type = em_list_itr->fltr_info.lkup_type;
5109 recp_list = &sw->recp_list[l_type];
5111 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5112 l_type != ICE_SW_LKUP_ETHERTYPE)
5113 return ICE_ERR_PARAM;
5115 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5118 if (em_list_itr->status)
5119 return em_list_itr->status;
5125 * ice_add_eth_mac - Add a ethertype based filter rule
5126 * @hw: pointer to the hardware structure
5127 * @em_list: list of ethertype and forwarding information
5129 * Function add ethertype rule for logical port from HW struct
5132 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5134 if (!em_list || !hw)
5135 return ICE_ERR_PARAM;
5137 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5138 hw->port_info->lport);
5142 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5143 * @hw: pointer to the hardware structure
5144 * @em_list: list of ethertype or ethertype MAC entries
5145 * @sw: pointer to switch info struct for which function add rule
5147 static enum ice_status
5148 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5149 struct ice_switch_info *sw)
5151 struct ice_fltr_list_entry *em_list_itr, *tmp;
5153 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5155 struct ice_sw_recipe *recp_list;
5156 enum ice_sw_lkup_type l_type;
5158 l_type = em_list_itr->fltr_info.lkup_type;
5160 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5161 l_type != ICE_SW_LKUP_ETHERTYPE)
5162 return ICE_ERR_PARAM;
5164 recp_list = &sw->recp_list[l_type];
5165 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5167 if (em_list_itr->status)
5168 return em_list_itr->status;
5174 * ice_remove_eth_mac - remove a ethertype based filter rule
5175 * @hw: pointer to the hardware structure
5176 * @em_list: list of ethertype and forwarding information
5180 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5182 if (!em_list || !hw)
5183 return ICE_ERR_PARAM;
5185 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5189 * ice_rem_sw_rule_info
5190 * @hw: pointer to the hardware structure
5191 * @rule_head: pointer to the switch list structure that we want to delete
5194 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5196 if (!LIST_EMPTY(rule_head)) {
5197 struct ice_fltr_mgmt_list_entry *entry;
5198 struct ice_fltr_mgmt_list_entry *tmp;
5200 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5201 ice_fltr_mgmt_list_entry, list_entry) {
5202 LIST_DEL(&entry->list_entry);
5203 ice_free(hw, entry);
5209 * ice_rem_adv_rule_info
5210 * @hw: pointer to the hardware structure
5211 * @rule_head: pointer to the switch list structure that we want to delete
5214 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5216 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5217 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5219 if (LIST_EMPTY(rule_head))
5222 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5223 ice_adv_fltr_mgmt_list_entry, list_entry) {
5224 LIST_DEL(&lst_itr->list_entry);
5225 ice_free(hw, lst_itr->lkups);
5226 ice_free(hw, lst_itr);
5231 * ice_rem_all_sw_rules_info
5232 * @hw: pointer to the hardware structure
5234 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5236 struct ice_switch_info *sw = hw->switch_info;
5239 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5240 struct LIST_HEAD_TYPE *rule_head;
5242 rule_head = &sw->recp_list[i].filt_rules;
5243 if (!sw->recp_list[i].adv_rule)
5244 ice_rem_sw_rule_info(hw, rule_head);
5246 ice_rem_adv_rule_info(hw, rule_head);
5247 if (sw->recp_list[i].adv_rule &&
5248 LIST_EMPTY(&sw->recp_list[i].filt_rules))
5249 sw->recp_list[i].adv_rule = false;
5254 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5255 * @pi: pointer to the port_info structure
5256 * @vsi_handle: VSI handle to set as default
5257 * @set: true to add the above mentioned switch rule, false to remove it
5258 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5260 * add filter rule to set/unset given VSI as default VSI for the switch
5261 * (represented by swid)
5264 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5267 struct ice_aqc_sw_rules_elem *s_rule;
5268 struct ice_fltr_info f_info;
5269 struct ice_hw *hw = pi->hw;
5270 enum ice_adminq_opc opcode;
5271 enum ice_status status;
5275 if (!ice_is_vsi_valid(hw, vsi_handle))
5276 return ICE_ERR_PARAM;
5277 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5279 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5280 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5282 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5284 return ICE_ERR_NO_MEMORY;
5286 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5288 f_info.lkup_type = ICE_SW_LKUP_DFLT;
5289 f_info.flag = direction;
5290 f_info.fltr_act = ICE_FWD_TO_VSI;
5291 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5293 if (f_info.flag & ICE_FLTR_RX) {
5294 f_info.src = pi->lport;
5295 f_info.src_id = ICE_SRC_ID_LPORT;
5297 f_info.fltr_rule_id =
5298 pi->dflt_rx_vsi_rule_id;
5299 } else if (f_info.flag & ICE_FLTR_TX) {
5300 f_info.src_id = ICE_SRC_ID_VSI;
5301 f_info.src = hw_vsi_id;
5303 f_info.fltr_rule_id =
5304 pi->dflt_tx_vsi_rule_id;
5308 opcode = ice_aqc_opc_add_sw_rules;
5310 opcode = ice_aqc_opc_remove_sw_rules;
5312 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5314 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5315 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5318 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5320 if (f_info.flag & ICE_FLTR_TX) {
5321 pi->dflt_tx_vsi_num = hw_vsi_id;
5322 pi->dflt_tx_vsi_rule_id = index;
5323 } else if (f_info.flag & ICE_FLTR_RX) {
5324 pi->dflt_rx_vsi_num = hw_vsi_id;
5325 pi->dflt_rx_vsi_rule_id = index;
5328 if (f_info.flag & ICE_FLTR_TX) {
5329 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5330 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5331 } else if (f_info.flag & ICE_FLTR_RX) {
5332 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5333 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5338 ice_free(hw, s_rule);
5343 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5344 * @list_head: head of rule list
5345 * @f_info: rule information
5347 * Helper function to search for a unicast rule entry - this is to be used
5348 * to remove unicast MAC filter that is not shared with other VSIs on the
5351 * Returns pointer to entry storing the rule if found
5353 static struct ice_fltr_mgmt_list_entry *
5354 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5355 struct ice_fltr_info *f_info)
5357 struct ice_fltr_mgmt_list_entry *list_itr;
5359 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5361 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5362 sizeof(f_info->l_data)) &&
5363 f_info->fwd_id.hw_vsi_id ==
5364 list_itr->fltr_info.fwd_id.hw_vsi_id &&
5365 f_info->flag == list_itr->fltr_info.flag)
5372 * ice_remove_mac_rule - remove a MAC based filter rule
5373 * @hw: pointer to the hardware structure
5374 * @m_list: list of MAC addresses and forwarding information
5375 * @recp_list: list from which function remove MAC address
5377 * This function removes either a MAC filter rule or a specific VSI from a
5378 * VSI list for a multicast MAC address.
5380 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5381 * ice_add_mac. Caller should be aware that this call will only work if all
5382 * the entries passed into m_list were added previously. It will not attempt to
5383 * do a partial remove of entries that were found.
5385 static enum ice_status
5386 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5387 struct ice_sw_recipe *recp_list)
5389 struct ice_fltr_list_entry *list_itr, *tmp;
5390 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5393 return ICE_ERR_PARAM;
5395 rule_lock = &recp_list->filt_rule_lock;
5396 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5398 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5399 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5402 if (l_type != ICE_SW_LKUP_MAC)
5403 return ICE_ERR_PARAM;
5405 vsi_handle = list_itr->fltr_info.vsi_handle;
5406 if (!ice_is_vsi_valid(hw, vsi_handle))
5407 return ICE_ERR_PARAM;
5409 list_itr->fltr_info.fwd_id.hw_vsi_id =
5410 ice_get_hw_vsi_num(hw, vsi_handle);
5411 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5412 /* Don't remove the unicast address that belongs to
5413 * another VSI on the switch, since it is not being
5416 ice_acquire_lock(rule_lock);
5417 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5418 &list_itr->fltr_info)) {
5419 ice_release_lock(rule_lock);
5420 return ICE_ERR_DOES_NOT_EXIST;
5422 ice_release_lock(rule_lock);
5424 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5426 if (list_itr->status)
5427 return list_itr->status;
5433 * ice_remove_mac - remove a MAC address based filter rule
5434 * @hw: pointer to the hardware structure
5435 * @m_list: list of MAC addresses and forwarding information
5438 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5440 struct ice_sw_recipe *recp_list;
5442 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5443 return ice_remove_mac_rule(hw, m_list, recp_list);
5447 * ice_remove_vlan_rule - Remove VLAN based filter rule
5448 * @hw: pointer to the hardware structure
5449 * @v_list: list of VLAN entries and forwarding information
5450 * @recp_list: list from which function remove VLAN
5452 static enum ice_status
5453 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5454 struct ice_sw_recipe *recp_list)
5456 struct ice_fltr_list_entry *v_list_itr, *tmp;
5458 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5460 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5462 if (l_type != ICE_SW_LKUP_VLAN)
5463 return ICE_ERR_PARAM;
5464 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5466 if (v_list_itr->status)
5467 return v_list_itr->status;
5473 * ice_remove_vlan - remove a VLAN address based filter rule
5474 * @hw: pointer to the hardware structure
5475 * @v_list: list of VLAN and forwarding information
5479 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5481 struct ice_sw_recipe *recp_list;
5484 return ICE_ERR_PARAM;
5486 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5487 return ice_remove_vlan_rule(hw, v_list, recp_list);
5491 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5492 * @hw: pointer to the hardware structure
5493 * @v_list: list of MAC VLAN entries and forwarding information
5494 * @recp_list: list from which function remove MAC VLAN
5496 static enum ice_status
5497 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5498 struct ice_sw_recipe *recp_list)
5500 struct ice_fltr_list_entry *v_list_itr, *tmp;
5502 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5503 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5505 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5507 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5508 return ICE_ERR_PARAM;
5509 v_list_itr->status =
5510 ice_remove_rule_internal(hw, recp_list,
5512 if (v_list_itr->status)
5513 return v_list_itr->status;
5519 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5520 * @hw: pointer to the hardware structure
5521 * @mv_list: list of MAC VLAN and forwarding information
5524 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5526 struct ice_sw_recipe *recp_list;
5528 if (!mv_list || !hw)
5529 return ICE_ERR_PARAM;
5531 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5532 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5536 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5537 * @fm_entry: filter entry to inspect
5538 * @vsi_handle: VSI handle to compare with filter info
5541 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5543 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5544 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5545 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5546 fm_entry->vsi_list_info &&
5547 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5552 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5553 * @hw: pointer to the hardware structure
5554 * @vsi_handle: VSI handle to remove filters from
5555 * @vsi_list_head: pointer to the list to add entry to
5556 * @fi: pointer to fltr_info of filter entry to copy & add
5558 * Helper function, used when creating a list of filters to remove from
5559 * a specific VSI. The entry added to vsi_list_head is a COPY of the
5560 * original filter entry, with the exception of fltr_info.fltr_act and
5561 * fltr_info.fwd_id fields. These are set such that later logic can
5562 * extract which VSI to remove the fltr from, and pass on that information.
5564 static enum ice_status
5565 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5566 struct LIST_HEAD_TYPE *vsi_list_head,
5567 struct ice_fltr_info *fi)
5569 struct ice_fltr_list_entry *tmp;
5571 /* this memory is freed up in the caller function
5572 * once filters for this VSI are removed
5574 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5576 return ICE_ERR_NO_MEMORY;
5578 tmp->fltr_info = *fi;
5580 /* Overwrite these fields to indicate which VSI to remove filter from,
5581 * so find and remove logic can extract the information from the
5582 * list entries. Note that original entries will still have proper
5585 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5586 tmp->fltr_info.vsi_handle = vsi_handle;
5587 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5589 LIST_ADD(&tmp->list_entry, vsi_list_head);
5595 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5596 * @hw: pointer to the hardware structure
5597 * @vsi_handle: VSI handle to remove filters from
5598 * @lkup_list_head: pointer to the list that has certain lookup type filters
5599 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5601 * Locates all filters in lkup_list_head that are used by the given VSI,
5602 * and adds COPIES of those entries to vsi_list_head (intended to be used
5603 * to remove the listed filters).
5604 * Note that this means all entries in vsi_list_head must be explicitly
5605 * deallocated by the caller when done with list.
5607 static enum ice_status
5608 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5609 struct LIST_HEAD_TYPE *lkup_list_head,
5610 struct LIST_HEAD_TYPE *vsi_list_head)
5612 struct ice_fltr_mgmt_list_entry *fm_entry;
5613 enum ice_status status = ICE_SUCCESS;
5615 /* check to make sure VSI ID is valid and within boundary */
5616 if (!ice_is_vsi_valid(hw, vsi_handle))
5617 return ICE_ERR_PARAM;
5619 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5620 ice_fltr_mgmt_list_entry, list_entry) {
5621 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5624 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5626 &fm_entry->fltr_info);
5634 * ice_determine_promisc_mask
5635 * @fi: filter info to parse
5637 * Helper function to determine which ICE_PROMISC_ mask corresponds
5638 * to given filter into.
5640 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5642 u16 vid = fi->l_data.mac_vlan.vlan_id;
5643 u8 *macaddr = fi->l_data.mac.mac_addr;
5644 bool is_tx_fltr = false;
5645 u8 promisc_mask = 0;
5647 if (fi->flag == ICE_FLTR_TX)
5650 if (IS_BROADCAST_ETHER_ADDR(macaddr))
5651 promisc_mask |= is_tx_fltr ?
5652 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5653 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5654 promisc_mask |= is_tx_fltr ?
5655 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5656 else if (IS_UNICAST_ETHER_ADDR(macaddr))
5657 promisc_mask |= is_tx_fltr ?
5658 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5660 promisc_mask |= is_tx_fltr ?
5661 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5663 return promisc_mask;
5667 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5668 * @hw: pointer to the hardware structure
5669 * @vsi_handle: VSI handle to retrieve info from
5670 * @promisc_mask: pointer to mask to be filled in
5671 * @vid: VLAN ID of promisc VLAN VSI
5672 * @sw: pointer to switch info struct for which function add rule
5674 static enum ice_status
5675 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5676 u16 *vid, struct ice_switch_info *sw)
5678 struct ice_fltr_mgmt_list_entry *itr;
5679 struct LIST_HEAD_TYPE *rule_head;
5680 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5682 if (!ice_is_vsi_valid(hw, vsi_handle))
5683 return ICE_ERR_PARAM;
5687 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5688 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5690 ice_acquire_lock(rule_lock);
5691 LIST_FOR_EACH_ENTRY(itr, rule_head,
5692 ice_fltr_mgmt_list_entry, list_entry) {
5693 /* Continue if this filter doesn't apply to this VSI or the
5694 * VSI ID is not in the VSI map for this filter
5696 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5699 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5701 ice_release_lock(rule_lock);
5707 * ice_get_vsi_promisc - get promiscuous mode of given VSI
5708 * @hw: pointer to the hardware structure
5709 * @vsi_handle: VSI handle to retrieve info from
5710 * @promisc_mask: pointer to mask to be filled in
5711 * @vid: VLAN ID of promisc VLAN VSI
5714 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5717 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5718 vid, hw->switch_info);
5722 * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5723 * @hw: pointer to the hardware structure
5724 * @vsi_handle: VSI handle to retrieve info from
5725 * @promisc_mask: pointer to mask to be filled in
5726 * @vid: VLAN ID of promisc VLAN VSI
5727 * @sw: pointer to switch info struct for which function add rule
5729 static enum ice_status
5730 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5731 u16 *vid, struct ice_switch_info *sw)
5733 struct ice_fltr_mgmt_list_entry *itr;
5734 struct LIST_HEAD_TYPE *rule_head;
5735 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5737 if (!ice_is_vsi_valid(hw, vsi_handle))
5738 return ICE_ERR_PARAM;
5742 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5743 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5745 ice_acquire_lock(rule_lock);
5746 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5748 /* Continue if this filter doesn't apply to this VSI or the
5749 * VSI ID is not in the VSI map for this filter
5751 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5754 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5756 ice_release_lock(rule_lock);
5762 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5763 * @hw: pointer to the hardware structure
5764 * @vsi_handle: VSI handle to retrieve info from
5765 * @promisc_mask: pointer to mask to be filled in
5766 * @vid: VLAN ID of promisc VLAN VSI
5769 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5772 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5773 vid, hw->switch_info);
5777 * ice_remove_promisc - Remove promisc based filter rules
5778 * @hw: pointer to the hardware structure
5779 * @recp_id: recipe ID for which the rule needs to removed
5780 * @v_list: list of promisc entries
5782 static enum ice_status
5783 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5784 struct LIST_HEAD_TYPE *v_list)
5786 struct ice_fltr_list_entry *v_list_itr, *tmp;
5787 struct ice_sw_recipe *recp_list;
5789 recp_list = &hw->switch_info->recp_list[recp_id];
5790 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5792 v_list_itr->status =
5793 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5794 if (v_list_itr->status)
5795 return v_list_itr->status;
5801 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5802 * @hw: pointer to the hardware structure
5803 * @vsi_handle: VSI handle to clear mode
5804 * @promisc_mask: mask of promiscuous config bits to clear
5805 * @vid: VLAN ID to clear VLAN promiscuous
5806 * @sw: pointer to switch info struct for which function add rule
5808 static enum ice_status
5809 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5810 u16 vid, struct ice_switch_info *sw)
5812 struct ice_fltr_list_entry *fm_entry, *tmp;
5813 struct LIST_HEAD_TYPE remove_list_head;
5814 struct ice_fltr_mgmt_list_entry *itr;
5815 struct LIST_HEAD_TYPE *rule_head;
5816 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5817 enum ice_status status = ICE_SUCCESS;
5820 if (!ice_is_vsi_valid(hw, vsi_handle))
5821 return ICE_ERR_PARAM;
5823 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5824 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5826 recipe_id = ICE_SW_LKUP_PROMISC;
5828 rule_head = &sw->recp_list[recipe_id].filt_rules;
5829 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5831 INIT_LIST_HEAD(&remove_list_head);
5833 ice_acquire_lock(rule_lock);
5834 LIST_FOR_EACH_ENTRY(itr, rule_head,
5835 ice_fltr_mgmt_list_entry, list_entry) {
5836 struct ice_fltr_info *fltr_info;
5837 u8 fltr_promisc_mask = 0;
5839 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5841 fltr_info = &itr->fltr_info;
5843 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5844 vid != fltr_info->l_data.mac_vlan.vlan_id)
5847 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5849 /* Skip if filter is not completely specified by given mask */
5850 if (fltr_promisc_mask & ~promisc_mask)
5853 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5857 ice_release_lock(rule_lock);
5858 goto free_fltr_list;
5861 ice_release_lock(rule_lock);
5863 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5866 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5867 ice_fltr_list_entry, list_entry) {
5868 LIST_DEL(&fm_entry->list_entry);
5869 ice_free(hw, fm_entry);
5876 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5877 * @hw: pointer to the hardware structure
5878 * @vsi_handle: VSI handle to clear mode
5879 * @promisc_mask: mask of promiscuous config bits to clear
5880 * @vid: VLAN ID to clear VLAN promiscuous
5883 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5884 u8 promisc_mask, u16 vid)
5886 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5887 vid, hw->switch_info);
5891 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5892 * @hw: pointer to the hardware structure
5893 * @vsi_handle: VSI handle to configure
5894 * @promisc_mask: mask of promiscuous config bits
5895 * @vid: VLAN ID to set VLAN promiscuous
5896 * @lport: logical port number to configure promisc mode
5897 * @sw: pointer to switch info struct for which function add rule
5899 static enum ice_status
5900 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5901 u16 vid, u8 lport, struct ice_switch_info *sw)
5903 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5904 struct ice_fltr_list_entry f_list_entry;
5905 struct ice_fltr_info new_fltr;
5906 enum ice_status status = ICE_SUCCESS;
5912 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5914 if (!ice_is_vsi_valid(hw, vsi_handle))
5915 return ICE_ERR_PARAM;
5916 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5918 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5920 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5921 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5922 new_fltr.l_data.mac_vlan.vlan_id = vid;
5923 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5925 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5926 recipe_id = ICE_SW_LKUP_PROMISC;
5929 /* Separate filters must be set for each direction/packet type
5930 * combination, so we will loop over the mask value, store the
5931 * individual type, and clear it out in the input mask as it
5934 while (promisc_mask) {
5935 struct ice_sw_recipe *recp_list;
5941 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5942 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5943 pkt_type = UCAST_FLTR;
5944 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5945 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5946 pkt_type = UCAST_FLTR;
5948 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5949 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5950 pkt_type = MCAST_FLTR;
5951 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5952 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5953 pkt_type = MCAST_FLTR;
5955 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5956 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5957 pkt_type = BCAST_FLTR;
5958 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5959 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5960 pkt_type = BCAST_FLTR;
5964 /* Check for VLAN promiscuous flag */
5965 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5966 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5967 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5968 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5972 /* Set filter DA based on packet type */
5973 mac_addr = new_fltr.l_data.mac.mac_addr;
5974 if (pkt_type == BCAST_FLTR) {
5975 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5976 } else if (pkt_type == MCAST_FLTR ||
5977 pkt_type == UCAST_FLTR) {
5978 /* Use the dummy ether header DA */
5979 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5980 ICE_NONDMA_TO_NONDMA);
5981 if (pkt_type == MCAST_FLTR)
5982 mac_addr[0] |= 0x1; /* Set multicast bit */
5985 /* Need to reset this to zero for all iterations */
5988 new_fltr.flag |= ICE_FLTR_TX;
5989 new_fltr.src = hw_vsi_id;
5991 new_fltr.flag |= ICE_FLTR_RX;
5992 new_fltr.src = lport;
5995 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5996 new_fltr.vsi_handle = vsi_handle;
5997 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5998 f_list_entry.fltr_info = new_fltr;
5999 recp_list = &sw->recp_list[recipe_id];
6001 status = ice_add_rule_internal(hw, recp_list, lport,
6003 if (status != ICE_SUCCESS)
6004 goto set_promisc_exit;
6012 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6013 * @hw: pointer to the hardware structure
6014 * @vsi_handle: VSI handle to configure
6015 * @promisc_mask: mask of promiscuous config bits
6016 * @vid: VLAN ID to set VLAN promiscuous
6019 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6022 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6023 hw->port_info->lport,
6028 * _ice_set_vlan_vsi_promisc
6029 * @hw: pointer to the hardware structure
6030 * @vsi_handle: VSI handle to configure
6031 * @promisc_mask: mask of promiscuous config bits
6032 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6033 * @lport: logical port number to configure promisc mode
6034 * @sw: pointer to switch info struct for which function add rule
6036 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6038 static enum ice_status
6039 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6040 bool rm_vlan_promisc, u8 lport,
6041 struct ice_switch_info *sw)
6043 struct ice_fltr_list_entry *list_itr, *tmp;
6044 struct LIST_HEAD_TYPE vsi_list_head;
6045 struct LIST_HEAD_TYPE *vlan_head;
6046 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6047 enum ice_status status;
6050 INIT_LIST_HEAD(&vsi_list_head);
6051 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6052 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6053 ice_acquire_lock(vlan_lock);
6054 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6056 ice_release_lock(vlan_lock);
6058 goto free_fltr_list;
6060 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6062 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6063 if (rm_vlan_promisc)
6064 status = _ice_clear_vsi_promisc(hw, vsi_handle,
6068 status = _ice_set_vsi_promisc(hw, vsi_handle,
6069 promisc_mask, vlan_id,
6076 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6077 ice_fltr_list_entry, list_entry) {
6078 LIST_DEL(&list_itr->list_entry);
6079 ice_free(hw, list_itr);
6085 * ice_set_vlan_vsi_promisc
6086 * @hw: pointer to the hardware structure
6087 * @vsi_handle: VSI handle to configure
6088 * @promisc_mask: mask of promiscuous config bits
6089 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6091 * Configure VSI with all associated VLANs to given promiscuous mode(s)
6094 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6095 bool rm_vlan_promisc)
6097 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6098 rm_vlan_promisc, hw->port_info->lport,
6103 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6104 * @hw: pointer to the hardware structure
6105 * @vsi_handle: VSI handle to remove filters from
6106 * @recp_list: recipe list from which function remove fltr
6107 * @lkup: switch rule filter lookup type
6110 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6111 struct ice_sw_recipe *recp_list,
6112 enum ice_sw_lkup_type lkup)
6114 struct ice_fltr_list_entry *fm_entry;
6115 struct LIST_HEAD_TYPE remove_list_head;
6116 struct LIST_HEAD_TYPE *rule_head;
6117 struct ice_fltr_list_entry *tmp;
6118 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6119 enum ice_status status;
6121 INIT_LIST_HEAD(&remove_list_head);
6122 rule_lock = &recp_list[lkup].filt_rule_lock;
6123 rule_head = &recp_list[lkup].filt_rules;
6124 ice_acquire_lock(rule_lock);
6125 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6127 ice_release_lock(rule_lock);
6129 goto free_fltr_list;
6132 case ICE_SW_LKUP_MAC:
6133 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6135 case ICE_SW_LKUP_VLAN:
6136 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6138 case ICE_SW_LKUP_PROMISC:
6139 case ICE_SW_LKUP_PROMISC_VLAN:
6140 ice_remove_promisc(hw, lkup, &remove_list_head);
6142 case ICE_SW_LKUP_MAC_VLAN:
6143 ice_remove_mac_vlan(hw, &remove_list_head);
6145 case ICE_SW_LKUP_ETHERTYPE:
6146 case ICE_SW_LKUP_ETHERTYPE_MAC:
6147 ice_remove_eth_mac(hw, &remove_list_head);
6149 case ICE_SW_LKUP_DFLT:
6150 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6152 case ICE_SW_LKUP_LAST:
6153 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6158 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6159 ice_fltr_list_entry, list_entry) {
6160 LIST_DEL(&fm_entry->list_entry);
6161 ice_free(hw, fm_entry);
6166 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6167 * @hw: pointer to the hardware structure
6168 * @vsi_handle: VSI handle to remove filters from
6169 * @sw: pointer to switch info struct
6172 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6173 struct ice_switch_info *sw)
6175 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6177 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6178 sw->recp_list, ICE_SW_LKUP_MAC);
6179 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6180 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6181 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6182 sw->recp_list, ICE_SW_LKUP_PROMISC);
6183 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6184 sw->recp_list, ICE_SW_LKUP_VLAN);
6185 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6186 sw->recp_list, ICE_SW_LKUP_DFLT);
6187 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6188 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6189 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6190 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6191 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6192 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6196 * ice_remove_vsi_fltr - Remove all filters for a VSI
6197 * @hw: pointer to the hardware structure
6198 * @vsi_handle: VSI handle to remove filters from
6200 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6202 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6206 * ice_alloc_res_cntr - allocating resource counter
6207 * @hw: pointer to the hardware structure
6208 * @type: type of resource
6209 * @alloc_shared: if set it is shared else dedicated
6210 * @num_items: number of entries requested for FD resource type
6211 * @counter_id: counter index returned by AQ call
6214 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6217 struct ice_aqc_alloc_free_res_elem *buf;
6218 enum ice_status status;
6221 /* Allocate resource */
6222 buf_len = ice_struct_size(buf, elem, 1);
6223 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6225 return ICE_ERR_NO_MEMORY;
6227 buf->num_elems = CPU_TO_LE16(num_items);
6228 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6229 ICE_AQC_RES_TYPE_M) | alloc_shared);
6231 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6232 ice_aqc_opc_alloc_res, NULL);
6236 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6244 * ice_free_res_cntr - free resource counter
6245 * @hw: pointer to the hardware structure
6246 * @type: type of resource
6247 * @alloc_shared: if set it is shared else dedicated
6248 * @num_items: number of entries to be freed for FD resource type
6249 * @counter_id: counter ID resource which needs to be freed
6252 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6255 struct ice_aqc_alloc_free_res_elem *buf;
6256 enum ice_status status;
6260 buf_len = ice_struct_size(buf, elem, 1);
6261 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6263 return ICE_ERR_NO_MEMORY;
6265 buf->num_elems = CPU_TO_LE16(num_items);
6266 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6267 ICE_AQC_RES_TYPE_M) | alloc_shared);
6268 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6270 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6271 ice_aqc_opc_free_res, NULL);
6273 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6280 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6281 * @hw: pointer to the hardware structure
6282 * @counter_id: returns counter index
6284 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6286 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6287 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6292 * ice_free_vlan_res_counter - Free counter resource for VLAN type
6293 * @hw: pointer to the hardware structure
6294 * @counter_id: counter index to be freed
6296 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6298 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6299 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6304 * ice_alloc_res_lg_act - add large action resource
6305 * @hw: pointer to the hardware structure
6306 * @l_id: large action ID to fill it in
6307 * @num_acts: number of actions to hold with a large action entry
6309 static enum ice_status
6310 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6312 struct ice_aqc_alloc_free_res_elem *sw_buf;
6313 enum ice_status status;
6316 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6317 return ICE_ERR_PARAM;
6319 /* Allocate resource for large action */
6320 buf_len = ice_struct_size(sw_buf, elem, 1);
6321 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6323 return ICE_ERR_NO_MEMORY;
6325 sw_buf->num_elems = CPU_TO_LE16(1);
6327 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6328 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6329 * If num_acts is greater than 2, then use
6330 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6331 * The num_acts cannot exceed 4. This was ensured at the
6332 * beginning of the function.
6335 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6336 else if (num_acts == 2)
6337 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6339 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6341 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6342 ice_aqc_opc_alloc_res, NULL);
6344 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6346 ice_free(hw, sw_buf);
6351 * ice_add_mac_with_sw_marker - add filter with sw marker
6352 * @hw: pointer to the hardware structure
6353 * @f_info: filter info structure containing the MAC filter information
6354 * @sw_marker: sw marker to tag the Rx descriptor with
6357 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6360 struct ice_fltr_mgmt_list_entry *m_entry;
6361 struct ice_fltr_list_entry fl_info;
6362 struct ice_sw_recipe *recp_list;
6363 struct LIST_HEAD_TYPE l_head;
6364 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6365 enum ice_status ret;
6369 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6370 return ICE_ERR_PARAM;
6372 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6373 return ICE_ERR_PARAM;
6375 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6376 return ICE_ERR_PARAM;
6378 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6379 return ICE_ERR_PARAM;
6380 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6382 /* Add filter if it doesn't exist so then the adding of large
6383 * action always results in update
6386 INIT_LIST_HEAD(&l_head);
6387 fl_info.fltr_info = *f_info;
6388 LIST_ADD(&fl_info.list_entry, &l_head);
6390 entry_exists = false;
6391 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6392 hw->port_info->lport);
6393 if (ret == ICE_ERR_ALREADY_EXISTS)
6394 entry_exists = true;
6398 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6399 rule_lock = &recp_list->filt_rule_lock;
6400 ice_acquire_lock(rule_lock);
6401 /* Get the book keeping entry for the filter */
6402 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6406 /* If counter action was enabled for this rule then don't enable
6407 * sw marker large action
6409 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6410 ret = ICE_ERR_PARAM;
6414 /* if same marker was added before */
6415 if (m_entry->sw_marker_id == sw_marker) {
6416 ret = ICE_ERR_ALREADY_EXISTS;
6420 /* Allocate a hardware table entry to hold large act. Three actions
6421 * for marker based large action
6423 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6427 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6430 /* Update the switch rule to add the marker action */
6431 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6433 ice_release_lock(rule_lock);
6438 ice_release_lock(rule_lock);
6439 /* only remove entry if it did not exist previously */
6441 ret = ice_remove_mac(hw, &l_head);
6447 * ice_add_mac_with_counter - add filter with counter enabled
6448 * @hw: pointer to the hardware structure
6449 * @f_info: pointer to filter info structure containing the MAC filter
6453 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6455 struct ice_fltr_mgmt_list_entry *m_entry;
6456 struct ice_fltr_list_entry fl_info;
6457 struct ice_sw_recipe *recp_list;
6458 struct LIST_HEAD_TYPE l_head;
6459 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6460 enum ice_status ret;
6465 if (f_info->fltr_act != ICE_FWD_TO_VSI)
6466 return ICE_ERR_PARAM;
6468 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6469 return ICE_ERR_PARAM;
6471 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6472 return ICE_ERR_PARAM;
6473 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6474 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6476 entry_exist = false;
6478 rule_lock = &recp_list->filt_rule_lock;
6480 /* Add filter if it doesn't exist so then the adding of large
6481 * action always results in update
6483 INIT_LIST_HEAD(&l_head);
6485 fl_info.fltr_info = *f_info;
6486 LIST_ADD(&fl_info.list_entry, &l_head);
6488 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6489 hw->port_info->lport);
6490 if (ret == ICE_ERR_ALREADY_EXISTS)
6495 ice_acquire_lock(rule_lock);
6496 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6498 ret = ICE_ERR_BAD_PTR;
6502 /* Don't enable counter for a filter for which sw marker was enabled */
6503 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6504 ret = ICE_ERR_PARAM;
6508 /* If a counter was already enabled then don't need to add again */
6509 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6510 ret = ICE_ERR_ALREADY_EXISTS;
6514 /* Allocate a hardware table entry to VLAN counter */
6515 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6519 /* Allocate a hardware table entry to hold large act. Two actions for
6520 * counter based large action
6522 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6526 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6529 /* Update the switch rule to add the counter action */
6530 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6532 ice_release_lock(rule_lock);
6537 ice_release_lock(rule_lock);
6538 /* only remove entry if it did not exist previously */
6540 ret = ice_remove_mac(hw, &l_head);
6545 /* This is mapping table entry that maps every word within a given protocol
6546 * structure to the real byte offset as per the specification of that
6548 * for example dst address is 3 words in ethertype header and corresponding
6549 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6550 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6551 * matching entry describing its field. This needs to be updated if new
6552 * structure is added to that union.
6554 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6555 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
6556 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
6557 { ICE_ETYPE_OL, { 0 } },
6558 { ICE_VLAN_OFOS, { 0, 2 } },
6559 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6560 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6561 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6562 26, 28, 30, 32, 34, 36, 38 } },
6563 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6564 26, 28, 30, 32, 34, 36, 38 } },
6565 { ICE_TCP_IL, { 0, 2 } },
6566 { ICE_UDP_OF, { 0, 2 } },
6567 { ICE_UDP_ILOS, { 0, 2 } },
6568 { ICE_SCTP_IL, { 0, 2 } },
6569 { ICE_VXLAN, { 8, 10, 12, 14 } },
6570 { ICE_GENEVE, { 8, 10, 12, 14 } },
6571 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
6572 { ICE_NVGRE, { 0, 2, 4, 6 } },
6573 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6574 { ICE_PPPOE, { 0, 2, 4, 6 } },
6575 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
6576 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
6577 { ICE_ESP, { 0, 2, 4, 6 } },
6578 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
6579 { ICE_NAT_T, { 8, 10, 12, 14 } },
6580 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
6581 { ICE_VLAN_EX, { 0, 2 } },
6582 { ICE_VLAN_IN, { 0, 2 } },
6585 /* The following table describes preferred grouping of recipes.
6586 * If a recipe that needs to be programmed is a superset or matches one of the
6587 * following combinations, then the recipe needs to be chained as per the
6591 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6592 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
6593 { ICE_MAC_IL, ICE_MAC_IL_HW },
6594 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
6595 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
6596 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
6597 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
6598 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
6599 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
6600 { ICE_TCP_IL, ICE_TCP_IL_HW },
6601 { ICE_UDP_OF, ICE_UDP_OF_HW },
6602 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
6603 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
6604 { ICE_VXLAN, ICE_UDP_OF_HW },
6605 { ICE_GENEVE, ICE_UDP_OF_HW },
6606 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
6607 { ICE_NVGRE, ICE_GRE_OF_HW },
6608 { ICE_GTP, ICE_UDP_OF_HW },
6609 { ICE_PPPOE, ICE_PPPOE_HW },
6610 { ICE_PFCP, ICE_UDP_ILOS_HW },
6611 { ICE_L2TPV3, ICE_L2TPV3_HW },
6612 { ICE_ESP, ICE_ESP_HW },
6613 { ICE_AH, ICE_AH_HW },
6614 { ICE_NAT_T, ICE_UDP_ILOS_HW },
6615 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
6616 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
6617 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
6621 * ice_find_recp - find a recipe
6622 * @hw: pointer to the hardware structure
6623 * @lkup_exts: extension sequence to match
6625 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6627 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6628 enum ice_sw_tunnel_type tun_type, u32 priority)
6630 bool refresh_required = true;
6631 struct ice_sw_recipe *recp;
6634 /* Walk through existing recipes to find a match */
6635 recp = hw->switch_info->recp_list;
6636 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6637 /* If recipe was not created for this ID, in SW bookkeeping,
6638 * check if FW has an entry for this recipe. If the FW has an
6639 * entry update it in our SW bookkeeping and continue with the
6642 if (!recp[i].recp_created)
6643 if (ice_get_recp_frm_fw(hw,
6644 hw->switch_info->recp_list, i,
6648 /* Skip inverse action recipes */
6649 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6650 ICE_AQ_RECIPE_ACT_INV_ACT)
6653 /* if number of words we are looking for match */
6654 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6655 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6656 struct ice_fv_word *be = lkup_exts->fv_words;
6657 u16 *cr = recp[i].lkup_exts.field_mask;
6658 u16 *de = lkup_exts->field_mask;
6662 /* ar, cr, and qr are related to the recipe words, while
6663 * be, de, and pe are related to the lookup words
6665 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6666 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6668 if (ar[qr].off == be[pe].off &&
6669 ar[qr].prot_id == be[pe].prot_id &&
6671 /* Found the "pe"th word in the
6676 /* After walking through all the words in the
6677 * "i"th recipe if "p"th word was not found then
6678 * this recipe is not what we are looking for.
6679 * So break out from this loop and try the next
6682 if (qr >= recp[i].lkup_exts.n_val_words) {
6687 /* If for "i"th recipe the found was never set to false
6688 * then it means we found our match
6690 if (tun_type == recp[i].tun_type && found &&
6691 priority == recp[i].priority)
6692 return i; /* Return the recipe ID */
6695 return ICE_MAX_NUM_RECIPES;
6699 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6701 * As protocol id for outer vlan is different in dvm and svm, if dvm is
6702 * supported protocol array record for outer vlan has to be modified to
6703 * reflect the value proper for DVM.
6705 void ice_change_proto_id_to_dvm(void)
6709 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6710 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6711 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6712 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6716 * ice_prot_type_to_id - get protocol ID from protocol type
6717 * @type: protocol type
6718 * @id: pointer to variable that will receive the ID
6720 * Returns true if found, false otherwise
6722 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6726 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6727 if (ice_prot_id_tbl[i].type == type) {
6728 *id = ice_prot_id_tbl[i].protocol_id;
6735 * ice_fill_valid_words - count valid words
6736 * @rule: advanced rule with lookup information
6737 * @lkup_exts: byte offset extractions of the words that are valid
6739 * calculate valid words in a lookup rule using mask value
6742 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6743 struct ice_prot_lkup_ext *lkup_exts)
6745 u8 j, word, prot_id, ret_val;
6747 if (!ice_prot_type_to_id(rule->type, &prot_id))
6750 word = lkup_exts->n_val_words;
6752 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6753 if (((u16 *)&rule->m_u)[j] &&
6754 (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6755 /* No more space to accommodate */
6756 if (word >= ICE_MAX_CHAIN_WORDS)
6758 lkup_exts->fv_words[word].off =
6759 ice_prot_ext[rule->type].offs[j];
6760 lkup_exts->fv_words[word].prot_id =
6761 ice_prot_id_tbl[rule->type].protocol_id;
6762 lkup_exts->field_mask[word] =
6763 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6767 ret_val = word - lkup_exts->n_val_words;
6768 lkup_exts->n_val_words = word;
6774 * ice_create_first_fit_recp_def - Create a recipe grouping
6775 * @hw: pointer to the hardware structure
6776 * @lkup_exts: an array of protocol header extractions
6777 * @rg_list: pointer to a list that stores new recipe groups
6778 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6780 * Using first fit algorithm, take all the words that are still not done
6781 * and start grouping them in 4-word groups. Each group makes up one
6784 static enum ice_status
6785 ice_create_first_fit_recp_def(struct ice_hw *hw,
6786 struct ice_prot_lkup_ext *lkup_exts,
6787 struct LIST_HEAD_TYPE *rg_list,
6790 struct ice_pref_recipe_group *grp = NULL;
6795 if (!lkup_exts->n_val_words) {
6796 struct ice_recp_grp_entry *entry;
6798 entry = (struct ice_recp_grp_entry *)
6799 ice_malloc(hw, sizeof(*entry));
6801 return ICE_ERR_NO_MEMORY;
6802 LIST_ADD(&entry->l_entry, rg_list);
6803 grp = &entry->r_group;
6805 grp->n_val_pairs = 0;
6808 /* Walk through every word in the rule to check if it is not done. If so
6809 * then this word needs to be part of a new recipe.
6811 for (j = 0; j < lkup_exts->n_val_words; j++)
6812 if (!ice_is_bit_set(lkup_exts->done, j)) {
6814 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6815 struct ice_recp_grp_entry *entry;
6817 entry = (struct ice_recp_grp_entry *)
6818 ice_malloc(hw, sizeof(*entry));
6820 return ICE_ERR_NO_MEMORY;
6821 LIST_ADD(&entry->l_entry, rg_list);
6822 grp = &entry->r_group;
6826 grp->pairs[grp->n_val_pairs].prot_id =
6827 lkup_exts->fv_words[j].prot_id;
6828 grp->pairs[grp->n_val_pairs].off =
6829 lkup_exts->fv_words[j].off;
6830 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6838 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6839 * @hw: pointer to the hardware structure
6840 * @fv_list: field vector with the extraction sequence information
6841 * @rg_list: recipe groupings with protocol-offset pairs
6843 * Helper function to fill in the field vector indices for protocol-offset
6844 * pairs. These indexes are then ultimately programmed into a recipe.
6846 static enum ice_status
6847 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6848 struct LIST_HEAD_TYPE *rg_list)
6850 struct ice_sw_fv_list_entry *fv;
6851 struct ice_recp_grp_entry *rg;
6852 struct ice_fv_word *fv_ext;
6854 if (LIST_EMPTY(fv_list))
6857 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6858 fv_ext = fv->fv_ptr->ew;
6860 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6863 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6864 struct ice_fv_word *pr;
6869 pr = &rg->r_group.pairs[i];
6870 mask = rg->r_group.mask[i];
6872 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6873 if (fv_ext[j].prot_id == pr->prot_id &&
6874 fv_ext[j].off == pr->off) {
6877 /* Store index of field vector */
6879 rg->fv_mask[i] = mask;
6883 /* Protocol/offset could not be found, caller gave an
6887 return ICE_ERR_PARAM;
6895 * ice_find_free_recp_res_idx - find free result indexes for recipe
6896 * @hw: pointer to hardware structure
6897 * @profiles: bitmap of profiles that will be associated with the new recipe
6898 * @free_idx: pointer to variable to receive the free index bitmap
6900 * The algorithm used here is:
6901 * 1. When creating a new recipe, create a set P which contains all
6902 * Profiles that will be associated with our new recipe
6904 * 2. For each Profile p in set P:
6905 * a. Add all recipes associated with Profile p into set R
6906 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6907 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6908 * i. Or just assume they all have the same possible indexes:
6910 * i.e., PossibleIndexes = 0x0000F00000000000
6912 * 3. For each Recipe r in set R:
6913 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6914 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6916 * FreeIndexes will contain the bits indicating the indexes free for use,
6917 * then the code needs to update the recipe[r].used_result_idx_bits to
6918 * indicate which indexes were selected for use by this recipe.
6921 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6922 ice_bitmap_t *free_idx)
6924 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6925 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6926 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6929 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6930 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6931 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6932 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6934 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6936 /* For each profile we are going to associate the recipe with, add the
6937 * recipes that are associated with that profile. This will give us
6938 * the set of recipes that our recipe may collide with. Also, determine
6939 * what possible result indexes are usable given this set of profiles.
6941 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6942 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6943 ICE_MAX_NUM_RECIPES);
6944 ice_and_bitmap(possible_idx, possible_idx,
6945 hw->switch_info->prof_res_bm[bit],
6949 /* For each recipe that our new recipe may collide with, determine
6950 * which indexes have been used.
6952 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6953 ice_or_bitmap(used_idx, used_idx,
6954 hw->switch_info->recp_list[bit].res_idxs,
6957 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6959 /* return number of free indexes */
6960 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6964 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6965 * @hw: pointer to hardware structure
6966 * @rm: recipe management list entry
6967 * @profiles: bitmap of profiles that will be associated.
6969 static enum ice_status
6970 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6971 ice_bitmap_t *profiles)
6973 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6974 struct ice_aqc_recipe_data_elem *tmp;
6975 struct ice_aqc_recipe_data_elem *buf;
6976 struct ice_recp_grp_entry *entry;
6977 enum ice_status status;
6983 /* When more than one recipe are required, another recipe is needed to
6984 * chain them together. Matching a tunnel metadata ID takes up one of
6985 * the match fields in the chaining recipe reducing the number of
6986 * chained recipes by one.
6988 /* check number of free result indices */
6989 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6990 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6992 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6993 free_res_idx, rm->n_grp_count);
6995 if (rm->n_grp_count > 1) {
6996 if (rm->n_grp_count > free_res_idx)
6997 return ICE_ERR_MAX_LIMIT;
7002 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7003 return ICE_ERR_MAX_LIMIT;
7005 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7006 ICE_MAX_NUM_RECIPES,
7009 return ICE_ERR_NO_MEMORY;
7011 buf = (struct ice_aqc_recipe_data_elem *)
7012 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7014 status = ICE_ERR_NO_MEMORY;
7018 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7019 recipe_count = ICE_MAX_NUM_RECIPES;
7020 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7022 if (status || recipe_count == 0)
7025 /* Allocate the recipe resources, and configure them according to the
7026 * match fields from protocol headers and extracted field vectors.
7028 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7029 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7032 status = ice_alloc_recipe(hw, &entry->rid);
7036 /* Clear the result index of the located recipe, as this will be
7037 * updated, if needed, later in the recipe creation process.
7039 tmp[0].content.result_indx = 0;
7041 buf[recps] = tmp[0];
7042 buf[recps].recipe_indx = (u8)entry->rid;
7043 /* if the recipe is a non-root recipe RID should be programmed
7044 * as 0 for the rules to be applied correctly.
7046 buf[recps].content.rid = 0;
7047 ice_memset(&buf[recps].content.lkup_indx, 0,
7048 sizeof(buf[recps].content.lkup_indx),
7051 /* All recipes use look-up index 0 to match switch ID. */
7052 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7053 buf[recps].content.mask[0] =
7054 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7055 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7058 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7059 buf[recps].content.lkup_indx[i] = 0x80;
7060 buf[recps].content.mask[i] = 0;
7063 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7064 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7065 buf[recps].content.mask[i + 1] =
7066 CPU_TO_LE16(entry->fv_mask[i]);
7069 if (rm->n_grp_count > 1) {
7070 /* Checks to see if there really is a valid result index
7073 if (chain_idx >= ICE_MAX_FV_WORDS) {
7074 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7075 status = ICE_ERR_MAX_LIMIT;
7079 entry->chain_idx = chain_idx;
7080 buf[recps].content.result_indx =
7081 ICE_AQ_RECIPE_RESULT_EN |
7082 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7083 ICE_AQ_RECIPE_RESULT_DATA_M);
7084 ice_clear_bit(chain_idx, result_idx_bm);
7085 chain_idx = ice_find_first_bit(result_idx_bm,
7089 /* fill recipe dependencies */
7090 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7091 ICE_MAX_NUM_RECIPES);
7092 ice_set_bit(buf[recps].recipe_indx,
7093 (ice_bitmap_t *)buf[recps].recipe_bitmap);
7094 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7098 if (rm->n_grp_count == 1) {
7099 rm->root_rid = buf[0].recipe_indx;
7100 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7101 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7102 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7103 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7104 sizeof(buf[0].recipe_bitmap),
7105 ICE_NONDMA_TO_NONDMA);
7107 status = ICE_ERR_BAD_PTR;
7110 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7111 * the recipe which is getting created if specified
7112 * by user. Usually any advanced switch filter, which results
7113 * into new extraction sequence, ended up creating a new recipe
7114 * of type ROOT and usually recipes are associated with profiles
7115 * Switch rule referreing newly created recipe, needs to have
7116 * either/or 'fwd' or 'join' priority, otherwise switch rule
7117 * evaluation will not happen correctly. In other words, if
7118 * switch rule to be evaluated on priority basis, then recipe
7119 * needs to have priority, otherwise it will be evaluated last.
7121 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7123 struct ice_recp_grp_entry *last_chain_entry;
7126 /* Allocate the last recipe that will chain the outcomes of the
7127 * other recipes together
7129 status = ice_alloc_recipe(hw, &rid);
7133 buf[recps].recipe_indx = (u8)rid;
7134 buf[recps].content.rid = (u8)rid;
7135 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7136 /* the new entry created should also be part of rg_list to
7137 * make sure we have complete recipe
7139 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7140 sizeof(*last_chain_entry));
7141 if (!last_chain_entry) {
7142 status = ICE_ERR_NO_MEMORY;
7145 last_chain_entry->rid = rid;
7146 ice_memset(&buf[recps].content.lkup_indx, 0,
7147 sizeof(buf[recps].content.lkup_indx),
7149 /* All recipes use look-up index 0 to match switch ID. */
7150 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7151 buf[recps].content.mask[0] =
7152 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7153 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7154 buf[recps].content.lkup_indx[i] =
7155 ICE_AQ_RECIPE_LKUP_IGNORE;
7156 buf[recps].content.mask[i] = 0;
7160 /* update r_bitmap with the recp that is used for chaining */
7161 ice_set_bit(rid, rm->r_bitmap);
7162 /* this is the recipe that chains all the other recipes so it
7163 * should not have a chaining ID to indicate the same
7165 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7166 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7168 last_chain_entry->fv_idx[i] = entry->chain_idx;
7169 buf[recps].content.lkup_indx[i] = entry->chain_idx;
7170 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7171 ice_set_bit(entry->rid, rm->r_bitmap);
7173 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7174 if (sizeof(buf[recps].recipe_bitmap) >=
7175 sizeof(rm->r_bitmap)) {
7176 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7177 sizeof(buf[recps].recipe_bitmap),
7178 ICE_NONDMA_TO_NONDMA);
7180 status = ICE_ERR_BAD_PTR;
7183 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7186 rm->root_rid = (u8)rid;
7188 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7192 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7193 ice_release_change_lock(hw);
7197 /* Every recipe that just got created add it to the recipe
7200 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7201 struct ice_switch_info *sw = hw->switch_info;
7202 bool is_root, idx_found = false;
7203 struct ice_sw_recipe *recp;
7204 u16 idx, buf_idx = 0;
7206 /* find buffer index for copying some data */
7207 for (idx = 0; idx < rm->n_grp_count; idx++)
7208 if (buf[idx].recipe_indx == entry->rid) {
7214 status = ICE_ERR_OUT_OF_RANGE;
7218 recp = &sw->recp_list[entry->rid];
7219 is_root = (rm->root_rid == entry->rid);
7220 recp->is_root = is_root;
7222 recp->root_rid = entry->rid;
7223 recp->big_recp = (is_root && rm->n_grp_count > 1);
7225 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7226 entry->r_group.n_val_pairs *
7227 sizeof(struct ice_fv_word),
7228 ICE_NONDMA_TO_NONDMA);
7230 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7231 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7233 /* Copy non-result fv index values and masks to recipe. This
7234 * call will also update the result recipe bitmask.
7236 ice_collect_result_idx(&buf[buf_idx], recp);
7238 /* for non-root recipes, also copy to the root, this allows
7239 * easier matching of a complete chained recipe
7242 ice_collect_result_idx(&buf[buf_idx],
7243 &sw->recp_list[rm->root_rid]);
7245 recp->n_ext_words = entry->r_group.n_val_pairs;
7246 recp->chain_idx = entry->chain_idx;
7247 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7248 recp->n_grp_count = rm->n_grp_count;
7249 recp->tun_type = rm->tun_type;
7250 recp->recp_created = true;
7264 * ice_create_recipe_group - creates recipe group
7265 * @hw: pointer to hardware structure
7266 * @rm: recipe management list entry
7267 * @lkup_exts: lookup elements
7269 static enum ice_status
7270 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7271 struct ice_prot_lkup_ext *lkup_exts)
7273 enum ice_status status;
7276 rm->n_grp_count = 0;
7278 /* Create recipes for words that are marked not done by packing them
7281 status = ice_create_first_fit_recp_def(hw, lkup_exts,
7282 &rm->rg_list, &recp_count);
7284 rm->n_grp_count += recp_count;
7285 rm->n_ext_words = lkup_exts->n_val_words;
7286 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7287 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7288 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7289 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7296 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7297 * @hw: pointer to hardware structure
7298 * @lkups: lookup elements or match criteria for the advanced recipe, one
7299 * structure per protocol header
7300 * @lkups_cnt: number of protocols
7301 * @bm: bitmap of field vectors to consider
7302 * @fv_list: pointer to a list that holds the returned field vectors
7304 static enum ice_status
7305 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7306 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7308 enum ice_status status;
7315 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7317 return ICE_ERR_NO_MEMORY;
7319 for (i = 0; i < lkups_cnt; i++)
7320 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7321 status = ICE_ERR_CFG;
7325 /* Find field vectors that include all specified protocol types */
7326 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7329 ice_free(hw, prot_ids);
7334 * ice_tun_type_match_word - determine if tun type needs a match mask
7335 * @tun_type: tunnel type
7336 * @mask: mask to be used for the tunnel
7338 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7341 case ICE_SW_TUN_VXLAN_GPE:
7342 case ICE_SW_TUN_GENEVE:
7343 case ICE_SW_TUN_VXLAN:
7344 case ICE_SW_TUN_NVGRE:
7345 case ICE_SW_TUN_UDP:
7346 case ICE_ALL_TUNNELS:
7347 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7348 case ICE_NON_TUN_QINQ:
7349 case ICE_SW_TUN_PPPOE_QINQ:
7350 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7351 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7352 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7353 *mask = ICE_TUN_FLAG_MASK;
7356 case ICE_SW_TUN_GENEVE_VLAN:
7357 case ICE_SW_TUN_VXLAN_VLAN:
7358 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7368 * ice_add_special_words - Add words that are not protocols, such as metadata
7369 * @rinfo: other information regarding the rule e.g. priority and action info
7370 * @lkup_exts: lookup word structure
7372 static enum ice_status
7373 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7374 struct ice_prot_lkup_ext *lkup_exts)
7378 /* If this is a tunneled packet, then add recipe index to match the
7379 * tunnel bit in the packet metadata flags.
7381 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7382 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7383 u8 word = lkup_exts->n_val_words++;
7385 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7386 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7387 lkup_exts->field_mask[word] = mask;
7389 return ICE_ERR_MAX_LIMIT;
7396 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7397 * @hw: pointer to hardware structure
7398 * @rinfo: other information regarding the rule e.g. priority and action info
7399 * @bm: pointer to memory for returning the bitmap of field vectors
7402 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7405 enum ice_prof_type prof_type;
7407 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7409 switch (rinfo->tun_type) {
7411 case ICE_NON_TUN_QINQ:
7412 prof_type = ICE_PROF_NON_TUN;
7414 case ICE_ALL_TUNNELS:
7415 prof_type = ICE_PROF_TUN_ALL;
7417 case ICE_SW_TUN_VXLAN_GPE:
7418 case ICE_SW_TUN_GENEVE:
7419 case ICE_SW_TUN_GENEVE_VLAN:
7420 case ICE_SW_TUN_VXLAN:
7421 case ICE_SW_TUN_VXLAN_VLAN:
7422 case ICE_SW_TUN_UDP:
7423 case ICE_SW_TUN_GTP:
7424 prof_type = ICE_PROF_TUN_UDP;
7426 case ICE_SW_TUN_NVGRE:
7427 prof_type = ICE_PROF_TUN_GRE;
7429 case ICE_SW_TUN_PPPOE:
7430 case ICE_SW_TUN_PPPOE_QINQ:
7431 prof_type = ICE_PROF_TUN_PPPOE;
7433 case ICE_SW_TUN_PPPOE_PAY:
7434 case ICE_SW_TUN_PPPOE_PAY_QINQ:
7435 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7437 case ICE_SW_TUN_PPPOE_IPV4:
7438 case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7439 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7440 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7441 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7443 case ICE_SW_TUN_PPPOE_IPV4_TCP:
7444 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7446 case ICE_SW_TUN_PPPOE_IPV4_UDP:
7447 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7449 case ICE_SW_TUN_PPPOE_IPV6:
7450 case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7451 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7452 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7453 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7455 case ICE_SW_TUN_PPPOE_IPV6_TCP:
7456 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7458 case ICE_SW_TUN_PPPOE_IPV6_UDP:
7459 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7461 case ICE_SW_TUN_PROFID_IPV6_ESP:
7462 case ICE_SW_TUN_IPV6_ESP:
7463 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7465 case ICE_SW_TUN_PROFID_IPV6_AH:
7466 case ICE_SW_TUN_IPV6_AH:
7467 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7469 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7470 case ICE_SW_TUN_IPV6_L2TPV3:
7471 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7473 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7474 case ICE_SW_TUN_IPV6_NAT_T:
7475 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7477 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7478 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7480 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7481 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7483 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7484 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7486 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7487 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7489 case ICE_SW_TUN_IPV4_NAT_T:
7490 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7492 case ICE_SW_TUN_IPV4_L2TPV3:
7493 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7495 case ICE_SW_TUN_IPV4_ESP:
7496 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7498 case ICE_SW_TUN_IPV4_AH:
7499 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7501 case ICE_SW_IPV4_TCP:
7502 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7504 case ICE_SW_IPV4_UDP:
7505 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7507 case ICE_SW_IPV6_TCP:
7508 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7510 case ICE_SW_IPV6_UDP:
7511 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7513 case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7514 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7516 case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7517 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7519 case ICE_SW_TUN_IPV4_GTPU_IPV4:
7520 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7521 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7522 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7524 case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7525 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7527 case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7528 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7530 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7531 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7532 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7533 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7535 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7536 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7538 case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7539 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7541 case ICE_SW_TUN_IPV6_GTPU_IPV4:
7542 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7543 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7544 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7546 case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7547 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7549 case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7550 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7552 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7553 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7554 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7555 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7557 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7558 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7560 case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7561 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7563 case ICE_SW_TUN_IPV4_GTPU_IPV6:
7564 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7565 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7566 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7568 case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7569 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7571 case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7572 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7574 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7575 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7576 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7577 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7579 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7580 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7582 case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7583 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7585 case ICE_SW_TUN_IPV6_GTPU_IPV6:
7586 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7587 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7588 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7590 case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7591 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7593 case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7594 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7596 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7597 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7598 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7599 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7601 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7602 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7604 case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7605 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7607 case ICE_SW_TUN_AND_NON_TUN:
7608 case ICE_SW_TUN_AND_NON_TUN_QINQ:
7610 prof_type = ICE_PROF_ALL;
7614 ice_get_sw_fv_bitmap(hw, prof_type, bm);
7618 * ice_is_prof_rule - determine if rule type is a profile rule
7619 * @type: the rule type
7621 * if the rule type is a profile rule, that means that there no field value
7622 * match required, in this case just a profile hit is required.
7624 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7627 case ICE_SW_TUN_PROFID_IPV6_ESP:
7628 case ICE_SW_TUN_PROFID_IPV6_AH:
7629 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7630 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7631 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7632 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7633 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7634 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7644 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7645 * @hw: pointer to hardware structure
7646 * @lkups: lookup elements or match criteria for the advanced recipe, one
7647 * structure per protocol header
7648 * @lkups_cnt: number of protocols
7649 * @rinfo: other information regarding the rule e.g. priority and action info
7650 * @rid: return the recipe ID of the recipe created
7652 static enum ice_status
7653 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7654 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7656 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7657 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7658 struct ice_prot_lkup_ext *lkup_exts;
7659 struct ice_recp_grp_entry *r_entry;
7660 struct ice_sw_fv_list_entry *fvit;
7661 struct ice_recp_grp_entry *r_tmp;
7662 struct ice_sw_fv_list_entry *tmp;
7663 enum ice_status status = ICE_SUCCESS;
7664 struct ice_sw_recipe *rm;
7667 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7668 return ICE_ERR_PARAM;
7670 lkup_exts = (struct ice_prot_lkup_ext *)
7671 ice_malloc(hw, sizeof(*lkup_exts));
7673 return ICE_ERR_NO_MEMORY;
7675 /* Determine the number of words to be matched and if it exceeds a
7676 * recipe's restrictions
7678 for (i = 0; i < lkups_cnt; i++) {
7681 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7682 status = ICE_ERR_CFG;
7683 goto err_free_lkup_exts;
7686 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7688 status = ICE_ERR_CFG;
7689 goto err_free_lkup_exts;
7693 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7695 status = ICE_ERR_NO_MEMORY;
7696 goto err_free_lkup_exts;
7699 /* Get field vectors that contain fields extracted from all the protocol
7700 * headers being programmed.
7702 INIT_LIST_HEAD(&rm->fv_list);
7703 INIT_LIST_HEAD(&rm->rg_list);
7705 /* Get bitmap of field vectors (profiles) that are compatible with the
7706 * rule request; only these will be searched in the subsequent call to
7709 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7711 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7715 /* Create any special protocol/offset pairs, such as looking at tunnel
7716 * bits by extracting metadata
7718 status = ice_add_special_words(rinfo, lkup_exts);
7720 goto err_free_lkup_exts;
7722 /* Group match words into recipes using preferred recipe grouping
7725 status = ice_create_recipe_group(hw, rm, lkup_exts);
7729 /* set the recipe priority if specified */
7730 rm->priority = (u8)rinfo->priority;
7732 /* Find offsets from the field vector. Pick the first one for all the
7735 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7739 /* An empty FV list means to use all the profiles returned in the
7742 if (LIST_EMPTY(&rm->fv_list)) {
7745 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7746 struct ice_sw_fv_list_entry *fvl;
7748 fvl = (struct ice_sw_fv_list_entry *)
7749 ice_malloc(hw, sizeof(*fvl));
7753 fvl->profile_id = j;
7754 LIST_ADD(&fvl->list_entry, &rm->fv_list);
7758 /* get bitmap of all profiles the recipe will be associated with */
7759 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7760 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7762 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7763 ice_set_bit((u16)fvit->profile_id, profiles);
7766 /* Look for a recipe which matches our requested fv / mask list */
7767 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7768 if (*rid < ICE_MAX_NUM_RECIPES)
7769 /* Success if found a recipe that match the existing criteria */
7772 rm->tun_type = rinfo->tun_type;
7773 /* Recipe we need does not exist, add a recipe */
7774 status = ice_add_sw_recipe(hw, rm, profiles);
7778 /* Associate all the recipes created with all the profiles in the
7779 * common field vector.
7781 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7783 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7786 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7787 (u8 *)r_bitmap, NULL);
7791 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7792 ICE_MAX_NUM_RECIPES);
7793 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7797 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7800 ice_release_change_lock(hw);
7805 /* Update profile to recipe bitmap array */
7806 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7807 ICE_MAX_NUM_RECIPES);
7809 /* Update recipe to profile bitmap array */
7810 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7811 ice_set_bit((u16)fvit->profile_id,
7812 recipe_to_profile[j]);
7815 *rid = rm->root_rid;
7816 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7817 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7819 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7820 ice_recp_grp_entry, l_entry) {
7821 LIST_DEL(&r_entry->l_entry);
7822 ice_free(hw, r_entry);
7825 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7827 LIST_DEL(&fvit->list_entry);
7832 ice_free(hw, rm->root_buf);
7837 ice_free(hw, lkup_exts);
7843 * ice_find_dummy_packet - find dummy packet by tunnel type
7845 * @lkups: lookup elements or match criteria for the advanced recipe, one
7846 * structure per protocol header
7847 * @lkups_cnt: number of protocols
7848 * @tun_type: tunnel type from the match criteria
7849 * @pkt: dummy packet to fill according to filter match criteria
7850 * @pkt_len: packet length of dummy packet
7851 * @offsets: pointer to receive the pointer to the offsets for the packet
7854 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7855 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7857 const struct ice_dummy_pkt_offsets **offsets)
7859 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7863 for (i = 0; i < lkups_cnt; i++) {
7864 if (lkups[i].type == ICE_UDP_ILOS)
7866 else if (lkups[i].type == ICE_TCP_IL)
7868 else if (lkups[i].type == ICE_IPV6_OFOS)
7870 else if (lkups[i].type == ICE_VLAN_OFOS)
7872 else if (lkups[i].type == ICE_IPV4_OFOS &&
7873 lkups[i].h_u.ipv4_hdr.protocol ==
7874 ICE_IPV4_NVGRE_PROTO_ID &&
7875 lkups[i].m_u.ipv4_hdr.protocol ==
7878 else if (lkups[i].type == ICE_PPPOE &&
7879 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7880 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7881 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7884 else if (lkups[i].type == ICE_ETYPE_OL &&
7885 lkups[i].h_u.ethertype.ethtype_id ==
7886 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7887 lkups[i].m_u.ethertype.ethtype_id ==
7890 else if (lkups[i].type == ICE_IPV4_IL &&
7891 lkups[i].h_u.ipv4_hdr.protocol ==
7893 lkups[i].m_u.ipv4_hdr.protocol ==
7898 if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7899 tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7900 *pkt = dummy_qinq_ipv6_pkt;
7901 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7902 *offsets = dummy_qinq_ipv6_packet_offsets;
7904 } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7905 tun_type == ICE_NON_TUN_QINQ) {
7906 *pkt = dummy_qinq_ipv4_pkt;
7907 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7908 *offsets = dummy_qinq_ipv4_packet_offsets;
7912 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7913 *pkt = dummy_qinq_pppoe_ipv6_packet;
7914 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7915 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7917 } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7918 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7919 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7920 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7922 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
7923 *pkt = dummy_qinq_pppoe_ipv6_packet;
7924 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7925 *offsets = dummy_qinq_pppoe_packet_offsets;
7927 } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7928 tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7929 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7930 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7931 *offsets = dummy_qinq_pppoe_packet_offsets;
7935 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7936 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7937 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7938 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7940 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7941 *pkt = dummy_ipv6_gtp_packet;
7942 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
7943 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7947 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7948 *pkt = dummy_ipv4_esp_pkt;
7949 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7950 *offsets = dummy_ipv4_esp_packet_offsets;
7954 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7955 *pkt = dummy_ipv6_esp_pkt;
7956 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7957 *offsets = dummy_ipv6_esp_packet_offsets;
7961 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7962 *pkt = dummy_ipv4_ah_pkt;
7963 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7964 *offsets = dummy_ipv4_ah_packet_offsets;
7968 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7969 *pkt = dummy_ipv6_ah_pkt;
7970 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7971 *offsets = dummy_ipv6_ah_packet_offsets;
7975 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7976 *pkt = dummy_ipv4_nat_pkt;
7977 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7978 *offsets = dummy_ipv4_nat_packet_offsets;
7982 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7983 *pkt = dummy_ipv6_nat_pkt;
7984 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7985 *offsets = dummy_ipv6_nat_packet_offsets;
7989 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7990 *pkt = dummy_ipv4_l2tpv3_pkt;
7991 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7992 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7996 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7997 *pkt = dummy_ipv6_l2tpv3_pkt;
7998 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7999 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8003 if (tun_type == ICE_SW_TUN_GTP) {
8004 *pkt = dummy_udp_gtp_packet;
8005 *pkt_len = sizeof(dummy_udp_gtp_packet);
8006 *offsets = dummy_udp_gtp_packet_offsets;
8010 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8011 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8012 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8013 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8014 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8018 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8019 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8020 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8021 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8022 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8026 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8027 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8028 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8029 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8030 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8034 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8035 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8036 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8037 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8038 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8042 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8043 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8044 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8045 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8046 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8050 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8051 tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8052 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8053 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8054 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8058 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8059 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8060 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8061 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8062 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8066 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8067 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8068 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8069 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8070 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8074 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8075 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8076 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8077 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8078 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8082 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8083 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8084 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8085 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8086 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8090 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8091 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8092 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8093 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8094 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8098 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8099 tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8100 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8101 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8102 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8106 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8107 *pkt = dummy_pppoe_ipv6_packet;
8108 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8109 *offsets = dummy_pppoe_packet_offsets;
8111 } else if (tun_type == ICE_SW_TUN_PPPOE ||
8112 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8113 *pkt = dummy_pppoe_ipv4_packet;
8114 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8115 *offsets = dummy_pppoe_packet_offsets;
8119 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8120 *pkt = dummy_pppoe_ipv4_packet;
8121 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8122 *offsets = dummy_pppoe_packet_ipv4_offsets;
8126 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8127 *pkt = dummy_pppoe_ipv4_tcp_packet;
8128 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8129 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8133 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8134 *pkt = dummy_pppoe_ipv4_udp_packet;
8135 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8136 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8140 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8141 *pkt = dummy_pppoe_ipv6_packet;
8142 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8143 *offsets = dummy_pppoe_packet_ipv6_offsets;
8147 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8148 *pkt = dummy_pppoe_ipv6_tcp_packet;
8149 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8150 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8154 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8155 *pkt = dummy_pppoe_ipv6_udp_packet;
8156 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8157 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8161 if (tun_type == ICE_SW_IPV4_TCP) {
8162 *pkt = dummy_tcp_packet;
8163 *pkt_len = sizeof(dummy_tcp_packet);
8164 *offsets = dummy_tcp_packet_offsets;
8168 if (tun_type == ICE_SW_IPV4_UDP) {
8169 *pkt = dummy_udp_packet;
8170 *pkt_len = sizeof(dummy_udp_packet);
8171 *offsets = dummy_udp_packet_offsets;
8175 if (tun_type == ICE_SW_IPV6_TCP) {
8176 *pkt = dummy_tcp_ipv6_packet;
8177 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8178 *offsets = dummy_tcp_ipv6_packet_offsets;
8182 if (tun_type == ICE_SW_IPV6_UDP) {
8183 *pkt = dummy_udp_ipv6_packet;
8184 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8185 *offsets = dummy_udp_ipv6_packet_offsets;
8189 /* Support GTP tunnel + L3 */
8190 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8191 tun_type == ICE_SW_TUN_GTP_IPV4) {
8192 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8193 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8194 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8197 if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8198 tun_type == ICE_SW_TUN_GTP_IPV6) {
8199 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8200 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8201 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8204 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
8205 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8206 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8207 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8210 if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
8211 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8212 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8213 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8217 if (tun_type == ICE_ALL_TUNNELS) {
8218 *pkt = dummy_gre_udp_packet;
8219 *pkt_len = sizeof(dummy_gre_udp_packet);
8220 *offsets = dummy_gre_udp_packet_offsets;
8224 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8226 *pkt = dummy_gre_tcp_packet;
8227 *pkt_len = sizeof(dummy_gre_tcp_packet);
8228 *offsets = dummy_gre_tcp_packet_offsets;
8232 *pkt = dummy_gre_udp_packet;
8233 *pkt_len = sizeof(dummy_gre_udp_packet);
8234 *offsets = dummy_gre_udp_packet_offsets;
8238 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8239 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8240 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8241 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8243 *pkt = dummy_udp_tun_tcp_packet;
8244 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8245 *offsets = dummy_udp_tun_tcp_packet_offsets;
8249 *pkt = dummy_udp_tun_udp_packet;
8250 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8251 *offsets = dummy_udp_tun_udp_packet_offsets;
8257 *pkt = dummy_vlan_udp_packet;
8258 *pkt_len = sizeof(dummy_vlan_udp_packet);
8259 *offsets = dummy_vlan_udp_packet_offsets;
8262 *pkt = dummy_udp_packet;
8263 *pkt_len = sizeof(dummy_udp_packet);
8264 *offsets = dummy_udp_packet_offsets;
8266 } else if (udp && ipv6) {
8268 *pkt = dummy_vlan_udp_ipv6_packet;
8269 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8270 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8273 *pkt = dummy_udp_ipv6_packet;
8274 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8275 *offsets = dummy_udp_ipv6_packet_offsets;
8277 } else if ((tcp && ipv6) || ipv6) {
8279 *pkt = dummy_vlan_tcp_ipv6_packet;
8280 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8281 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8284 *pkt = dummy_tcp_ipv6_packet;
8285 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8286 *offsets = dummy_tcp_ipv6_packet_offsets;
8291 *pkt = dummy_vlan_tcp_packet;
8292 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8293 *offsets = dummy_vlan_tcp_packet_offsets;
8295 *pkt = dummy_tcp_packet;
8296 *pkt_len = sizeof(dummy_tcp_packet);
8297 *offsets = dummy_tcp_packet_offsets;
8302 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8304 * @lkups: lookup elements or match criteria for the advanced recipe, one
8305 * structure per protocol header
8306 * @lkups_cnt: number of protocols
8307 * @s_rule: stores rule information from the match criteria
8308 * @dummy_pkt: dummy packet to fill according to filter match criteria
8309 * @pkt_len: packet length of dummy packet
8310 * @offsets: offset info for the dummy packet
8312 static enum ice_status
8313 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8314 struct ice_aqc_sw_rules_elem *s_rule,
8315 const u8 *dummy_pkt, u16 pkt_len,
8316 const struct ice_dummy_pkt_offsets *offsets)
8321 /* Start with a packet with a pre-defined/dummy content. Then, fill
8322 * in the header values to be looked up or matched.
8324 pkt = s_rule->pdata.lkup_tx_rx.hdr;
8326 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8328 for (i = 0; i < lkups_cnt; i++) {
8329 enum ice_protocol_type type;
8330 u16 offset = 0, len = 0, j;
8333 /* find the start of this layer; it should be found since this
8334 * was already checked when search for the dummy packet
8336 type = lkups[i].type;
8337 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8338 if (type == offsets[j].type) {
8339 offset = offsets[j].offset;
8344 /* this should never happen in a correct calling sequence */
8346 return ICE_ERR_PARAM;
8348 switch (lkups[i].type) {
8351 len = sizeof(struct ice_ether_hdr);
8354 len = sizeof(struct ice_ethtype_hdr);
8359 len = sizeof(struct ice_vlan_hdr);
8363 len = sizeof(struct ice_ipv4_hdr);
8367 len = sizeof(struct ice_ipv6_hdr);
8372 len = sizeof(struct ice_l4_hdr);
8375 len = sizeof(struct ice_sctp_hdr);
8378 len = sizeof(struct ice_nvgre);
8383 len = sizeof(struct ice_udp_tnl_hdr);
8387 case ICE_GTP_NO_PAY:
8388 len = sizeof(struct ice_udp_gtp_hdr);
8391 len = sizeof(struct ice_pppoe_hdr);
8394 len = sizeof(struct ice_esp_hdr);
8397 len = sizeof(struct ice_nat_t_hdr);
8400 len = sizeof(struct ice_ah_hdr);
8403 len = sizeof(struct ice_l2tpv3_sess_hdr);
8406 return ICE_ERR_PARAM;
8409 /* the length should be a word multiple */
8410 if (len % ICE_BYTES_PER_WORD)
8413 /* We have the offset to the header start, the length, the
8414 * caller's header values and mask. Use this information to
8415 * copy the data into the dummy packet appropriately based on
8416 * the mask. Note that we need to only write the bits as
8417 * indicated by the mask to make sure we don't improperly write
8418 * over any significant packet data.
8420 for (j = 0; j < len / sizeof(u16); j++)
8421 if (((u16 *)&lkups[i].m_u)[j])
8422 ((u16 *)(pkt + offset))[j] =
8423 (((u16 *)(pkt + offset))[j] &
8424 ~((u16 *)&lkups[i].m_u)[j]) |
8425 (((u16 *)&lkups[i].h_u)[j] &
8426 ((u16 *)&lkups[i].m_u)[j]);
8429 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8435 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8436 * @hw: pointer to the hardware structure
8437 * @tun_type: tunnel type
8438 * @pkt: dummy packet to fill in
8439 * @offsets: offset info for the dummy packet
8441 static enum ice_status
8442 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8443 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8448 case ICE_SW_TUN_AND_NON_TUN:
8449 case ICE_SW_TUN_VXLAN_GPE:
8450 case ICE_SW_TUN_VXLAN:
8451 case ICE_SW_TUN_VXLAN_VLAN:
8452 case ICE_SW_TUN_UDP:
8453 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8457 case ICE_SW_TUN_GENEVE:
8458 case ICE_SW_TUN_GENEVE_VLAN:
8459 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8464 /* Nothing needs to be done for this tunnel type */
8468 /* Find the outer UDP protocol header and insert the port number */
8469 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8470 if (offsets[i].type == ICE_UDP_OF) {
8471 struct ice_l4_hdr *hdr;
8474 offset = offsets[i].offset;
8475 hdr = (struct ice_l4_hdr *)&pkt[offset];
8476 hdr->dst_port = CPU_TO_BE16(open_port);
8486 * ice_find_adv_rule_entry - Search a rule entry
8487 * @hw: pointer to the hardware structure
8488 * @lkups: lookup elements or match criteria for the advanced recipe, one
8489 * structure per protocol header
8490 * @lkups_cnt: number of protocols
8491 * @recp_id: recipe ID for which we are finding the rule
8492 * @rinfo: other information regarding the rule e.g. priority and action info
8494 * Helper function to search for a given advance rule entry
8495 * Returns pointer to entry storing the rule if found
8497 static struct ice_adv_fltr_mgmt_list_entry *
8498 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8499 u16 lkups_cnt, u16 recp_id,
8500 struct ice_adv_rule_info *rinfo)
8502 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8503 struct ice_switch_info *sw = hw->switch_info;
8506 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8507 ice_adv_fltr_mgmt_list_entry, list_entry) {
8508 bool lkups_matched = true;
8510 if (lkups_cnt != list_itr->lkups_cnt)
8512 for (i = 0; i < list_itr->lkups_cnt; i++)
8513 if (memcmp(&list_itr->lkups[i], &lkups[i],
8515 lkups_matched = false;
8518 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8519 rinfo->tun_type == list_itr->rule_info.tun_type &&
8527 * ice_adv_add_update_vsi_list
8528 * @hw: pointer to the hardware structure
8529 * @m_entry: pointer to current adv filter management list entry
8530 * @cur_fltr: filter information from the book keeping entry
8531 * @new_fltr: filter information with the new VSI to be added
8533 * Call AQ command to add or update previously created VSI list with new VSI.
8535 * Helper function to do book keeping associated with adding filter information
8536 * The algorithm to do the booking keeping is described below :
8537 * When a VSI needs to subscribe to a given advanced filter
8538 * if only one VSI has been added till now
8539 * Allocate a new VSI list and add two VSIs
8540 * to this list using switch rule command
8541 * Update the previously created switch rule with the
8542 * newly created VSI list ID
8543 * if a VSI list was previously created
8544 * Add the new VSI to the previously created VSI list set
8545 * using the update switch rule command
8547 static enum ice_status
8548 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8549 struct ice_adv_fltr_mgmt_list_entry *m_entry,
8550 struct ice_adv_rule_info *cur_fltr,
8551 struct ice_adv_rule_info *new_fltr)
8553 enum ice_status status;
8554 u16 vsi_list_id = 0;
8556 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8557 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8558 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8559 return ICE_ERR_NOT_IMPL;
8561 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8562 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8563 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8564 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8565 return ICE_ERR_NOT_IMPL;
8567 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8568 /* Only one entry existed in the mapping and it was not already
8569 * a part of a VSI list. So, create a VSI list with the old and
8572 struct ice_fltr_info tmp_fltr;
8573 u16 vsi_handle_arr[2];
8575 /* A rule already exists with the new VSI being added */
8576 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8577 new_fltr->sw_act.fwd_id.hw_vsi_id)
8578 return ICE_ERR_ALREADY_EXISTS;
8580 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8581 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8582 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8588 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8589 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8590 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8591 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8592 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8593 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8595 /* Update the previous switch rule of "forward to VSI" to
8598 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8602 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8603 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8604 m_entry->vsi_list_info =
8605 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8608 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8610 if (!m_entry->vsi_list_info)
8613 /* A rule already exists with the new VSI being added */
8614 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8617 /* Update the previously created VSI list set with
8618 * the new VSI ID passed in
8620 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8622 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8624 ice_aqc_opc_update_sw_rules,
8626 /* update VSI list mapping info with new VSI ID */
8628 ice_set_bit(vsi_handle,
8629 m_entry->vsi_list_info->vsi_map);
8632 m_entry->vsi_count++;
8637 * ice_add_adv_rule - helper function to create an advanced switch rule
8638 * @hw: pointer to the hardware structure
8639 * @lkups: information on the words that needs to be looked up. All words
8640 * together makes one recipe
8641 * @lkups_cnt: num of entries in the lkups array
8642 * @rinfo: other information related to the rule that needs to be programmed
8643 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8644 * ignored is case of error.
8646 * This function can program only 1 rule at a time. The lkups is used to
8647 * describe the all the words that forms the "lookup" portion of the recipe.
8648 * These words can span multiple protocols. Callers to this function need to
8649 * pass in a list of protocol headers with lookup information along and mask
8650 * that determines which words are valid from the given protocol header.
8651 * rinfo describes other information related to this rule such as forwarding
8652 * IDs, priority of this rule, etc.
8655 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8656 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8657 struct ice_rule_query_data *added_entry)
8659 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8660 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8661 const struct ice_dummy_pkt_offsets *pkt_offsets;
8662 struct ice_aqc_sw_rules_elem *s_rule = NULL;
8663 struct LIST_HEAD_TYPE *rule_head;
8664 struct ice_switch_info *sw;
8665 enum ice_status status;
8666 const u8 *pkt = NULL;
8672 /* Initialize profile to result index bitmap */
8673 if (!hw->switch_info->prof_res_bm_init) {
8674 hw->switch_info->prof_res_bm_init = 1;
8675 ice_init_prof_result_bm(hw);
8678 prof_rule = ice_is_prof_rule(rinfo->tun_type);
8679 if (!prof_rule && !lkups_cnt)
8680 return ICE_ERR_PARAM;
8682 /* get # of words we need to match */
8684 for (i = 0; i < lkups_cnt; i++) {
8687 ptr = (u16 *)&lkups[i].m_u;
8688 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8694 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8695 return ICE_ERR_PARAM;
8697 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8698 return ICE_ERR_PARAM;
8701 /* make sure that we can locate a dummy packet */
8702 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8705 status = ICE_ERR_PARAM;
8706 goto err_ice_add_adv_rule;
8709 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8710 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8711 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8712 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8715 vsi_handle = rinfo->sw_act.vsi_handle;
8716 if (!ice_is_vsi_valid(hw, vsi_handle))
8717 return ICE_ERR_PARAM;
8719 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8720 rinfo->sw_act.fwd_id.hw_vsi_id =
8721 ice_get_hw_vsi_num(hw, vsi_handle);
8722 if (rinfo->sw_act.flag & ICE_FLTR_TX)
8723 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8725 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8728 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8730 /* we have to add VSI to VSI_LIST and increment vsi_count.
8731 * Also Update VSI list so that we can change forwarding rule
8732 * if the rule already exists, we will check if it exists with
8733 * same vsi_id, if not then add it to the VSI list if it already
8734 * exists if not then create a VSI list and add the existing VSI
8735 * ID and the new VSI ID to the list
8736 * We will add that VSI to the list
8738 status = ice_adv_add_update_vsi_list(hw, m_entry,
8739 &m_entry->rule_info,
8742 added_entry->rid = rid;
8743 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8744 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8748 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8749 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8751 return ICE_ERR_NO_MEMORY;
8752 act |= ICE_SINGLE_ACT_LAN_ENABLE;
8753 switch (rinfo->sw_act.fltr_act) {
8754 case ICE_FWD_TO_VSI:
8755 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8756 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8757 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8760 act |= ICE_SINGLE_ACT_TO_Q;
8761 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8762 ICE_SINGLE_ACT_Q_INDEX_M;
8764 case ICE_FWD_TO_QGRP:
8765 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8766 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8767 act |= ICE_SINGLE_ACT_TO_Q;
8768 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8769 ICE_SINGLE_ACT_Q_INDEX_M;
8770 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8771 ICE_SINGLE_ACT_Q_REGION_M;
8773 case ICE_DROP_PACKET:
8774 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8775 ICE_SINGLE_ACT_VALID_BIT;
8778 status = ICE_ERR_CFG;
8779 goto err_ice_add_adv_rule;
8782 /* set the rule LOOKUP type based on caller specified 'RX'
8783 * instead of hardcoding it to be either LOOKUP_TX/RX
8785 * for 'RX' set the source to be the port number
8786 * for 'TX' set the source to be the source HW VSI number (determined
8790 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8791 s_rule->pdata.lkup_tx_rx.src =
8792 CPU_TO_LE16(hw->port_info->lport);
8794 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8795 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8798 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8799 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8801 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8802 pkt_len, pkt_offsets);
8804 goto err_ice_add_adv_rule;
8806 if (rinfo->tun_type != ICE_NON_TUN &&
8807 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8808 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8809 s_rule->pdata.lkup_tx_rx.hdr,
8812 goto err_ice_add_adv_rule;
8815 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8816 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8819 goto err_ice_add_adv_rule;
8820 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8821 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8823 status = ICE_ERR_NO_MEMORY;
8824 goto err_ice_add_adv_rule;
8827 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8828 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8829 ICE_NONDMA_TO_NONDMA);
8830 if (!adv_fltr->lkups && !prof_rule) {
8831 status = ICE_ERR_NO_MEMORY;
8832 goto err_ice_add_adv_rule;
8835 adv_fltr->lkups_cnt = lkups_cnt;
8836 adv_fltr->rule_info = *rinfo;
8837 adv_fltr->rule_info.fltr_rule_id =
8838 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8839 sw = hw->switch_info;
8840 sw->recp_list[rid].adv_rule = true;
8841 rule_head = &sw->recp_list[rid].filt_rules;
8843 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8844 adv_fltr->vsi_count = 1;
8846 /* Add rule entry to book keeping list */
8847 LIST_ADD(&adv_fltr->list_entry, rule_head);
8849 added_entry->rid = rid;
8850 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8851 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8853 err_ice_add_adv_rule:
8854 if (status && adv_fltr) {
8855 ice_free(hw, adv_fltr->lkups);
8856 ice_free(hw, adv_fltr);
8859 ice_free(hw, s_rule);
8865 * ice_adv_rem_update_vsi_list
8866 * @hw: pointer to the hardware structure
8867 * @vsi_handle: VSI handle of the VSI to remove
8868 * @fm_list: filter management entry for which the VSI list management needs to
8871 static enum ice_status
8872 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8873 struct ice_adv_fltr_mgmt_list_entry *fm_list)
8875 struct ice_vsi_list_map_info *vsi_list_info;
8876 enum ice_sw_lkup_type lkup_type;
8877 enum ice_status status;
8880 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8881 fm_list->vsi_count == 0)
8882 return ICE_ERR_PARAM;
8884 /* A rule with the VSI being removed does not exist */
8885 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8886 return ICE_ERR_DOES_NOT_EXIST;
8888 lkup_type = ICE_SW_LKUP_LAST;
8889 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8890 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8891 ice_aqc_opc_update_sw_rules,
8896 fm_list->vsi_count--;
8897 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8898 vsi_list_info = fm_list->vsi_list_info;
8899 if (fm_list->vsi_count == 1) {
8900 struct ice_fltr_info tmp_fltr;
8903 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8905 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8906 return ICE_ERR_OUT_OF_RANGE;
8908 /* Make sure VSI list is empty before removing it below */
8909 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8911 ice_aqc_opc_update_sw_rules,
8916 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8917 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8918 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8919 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8920 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8921 tmp_fltr.fwd_id.hw_vsi_id =
8922 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8923 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8924 ice_get_hw_vsi_num(hw, rem_vsi_handle);
8925 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8927 /* Update the previous switch rule of "MAC forward to VSI" to
8928 * "MAC fwd to VSI list"
8930 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8932 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8933 tmp_fltr.fwd_id.hw_vsi_id, status);
8936 fm_list->vsi_list_info->ref_cnt--;
8938 /* Remove the VSI list since it is no longer used */
8939 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8941 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8942 vsi_list_id, status);
8946 LIST_DEL(&vsi_list_info->list_entry);
8947 ice_free(hw, vsi_list_info);
8948 fm_list->vsi_list_info = NULL;
8955 * ice_rem_adv_rule - removes existing advanced switch rule
8956 * @hw: pointer to the hardware structure
8957 * @lkups: information on the words that needs to be looked up. All words
8958 * together makes one recipe
8959 * @lkups_cnt: num of entries in the lkups array
8960 * @rinfo: Its the pointer to the rule information for the rule
8962 * This function can be used to remove 1 rule at a time. The lkups is
8963 * used to describe all the words that forms the "lookup" portion of the
8964 * rule. These words can span multiple protocols. Callers to this function
8965 * need to pass in a list of protocol headers with lookup information along
8966 * and mask that determines which words are valid from the given protocol
8967 * header. rinfo describes other information related to this rule such as
8968 * forwarding IDs, priority of this rule, etc.
8971 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8972 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8974 struct ice_adv_fltr_mgmt_list_entry *list_elem;
8975 struct ice_prot_lkup_ext lkup_exts;
8976 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8977 enum ice_status status = ICE_SUCCESS;
8978 bool remove_rule = false;
8979 u16 i, rid, vsi_handle;
8981 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8982 for (i = 0; i < lkups_cnt; i++) {
8985 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8988 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8993 /* Create any special protocol/offset pairs, such as looking at tunnel
8994 * bits by extracting metadata
8996 status = ice_add_special_words(rinfo, &lkup_exts);
9000 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9001 /* If did not find a recipe that match the existing criteria */
9002 if (rid == ICE_MAX_NUM_RECIPES)
9003 return ICE_ERR_PARAM;
9005 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9006 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9007 /* the rule is already removed */
9010 ice_acquire_lock(rule_lock);
9011 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9013 } else if (list_elem->vsi_count > 1) {
9014 remove_rule = false;
9015 vsi_handle = rinfo->sw_act.vsi_handle;
9016 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9018 vsi_handle = rinfo->sw_act.vsi_handle;
9019 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9021 ice_release_lock(rule_lock);
9024 if (list_elem->vsi_count == 0)
9027 ice_release_lock(rule_lock);
9029 struct ice_aqc_sw_rules_elem *s_rule;
9032 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9033 s_rule = (struct ice_aqc_sw_rules_elem *)
9034 ice_malloc(hw, rule_buf_sz);
9036 return ICE_ERR_NO_MEMORY;
9037 s_rule->pdata.lkup_tx_rx.act = 0;
9038 s_rule->pdata.lkup_tx_rx.index =
9039 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9040 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9041 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9043 ice_aqc_opc_remove_sw_rules, NULL);
9044 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9045 struct ice_switch_info *sw = hw->switch_info;
9047 ice_acquire_lock(rule_lock);
9048 LIST_DEL(&list_elem->list_entry);
9049 ice_free(hw, list_elem->lkups);
9050 ice_free(hw, list_elem);
9051 ice_release_lock(rule_lock);
9052 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9053 sw->recp_list[rid].adv_rule = false;
9055 ice_free(hw, s_rule);
9061 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9062 * @hw: pointer to the hardware structure
9063 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9065 * This function is used to remove 1 rule at a time. The removal is based on
9066 * the remove_entry parameter. This function will remove rule for a given
9067 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9070 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9071 struct ice_rule_query_data *remove_entry)
9073 struct ice_adv_fltr_mgmt_list_entry *list_itr;
9074 struct LIST_HEAD_TYPE *list_head;
9075 struct ice_adv_rule_info rinfo;
9076 struct ice_switch_info *sw;
9078 sw = hw->switch_info;
9079 if (!sw->recp_list[remove_entry->rid].recp_created)
9080 return ICE_ERR_PARAM;
9081 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9082 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9084 if (list_itr->rule_info.fltr_rule_id ==
9085 remove_entry->rule_id) {
9086 rinfo = list_itr->rule_info;
9087 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9088 return ice_rem_adv_rule(hw, list_itr->lkups,
9089 list_itr->lkups_cnt, &rinfo);
9092 /* either list is empty or unable to find rule */
9093 return ICE_ERR_DOES_NOT_EXIST;
9097 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9099 * @hw: pointer to the hardware structure
9100 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9102 * This function is used to remove all the rules for a given VSI and as soon
9103 * as removing a rule fails, it will return immediately with the error code,
9104 * else it will return ICE_SUCCESS
9106 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9108 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9109 struct ice_vsi_list_map_info *map_info;
9110 struct LIST_HEAD_TYPE *list_head;
9111 struct ice_adv_rule_info rinfo;
9112 struct ice_switch_info *sw;
9113 enum ice_status status;
9116 sw = hw->switch_info;
9117 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9118 if (!sw->recp_list[rid].recp_created)
9120 if (!sw->recp_list[rid].adv_rule)
9123 list_head = &sw->recp_list[rid].filt_rules;
9124 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9125 ice_adv_fltr_mgmt_list_entry,
9127 rinfo = list_itr->rule_info;
9129 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9130 map_info = list_itr->vsi_list_info;
9134 if (!ice_is_bit_set(map_info->vsi_map,
9137 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9141 rinfo.sw_act.vsi_handle = vsi_handle;
9142 status = ice_rem_adv_rule(hw, list_itr->lkups,
9143 list_itr->lkups_cnt, &rinfo);
9153 * ice_replay_fltr - Replay all the filters stored by a specific list head
9154 * @hw: pointer to the hardware structure
9155 * @list_head: list for which filters needs to be replayed
9156 * @recp_id: Recipe ID for which rules need to be replayed
9158 static enum ice_status
9159 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9161 struct ice_fltr_mgmt_list_entry *itr;
9162 enum ice_status status = ICE_SUCCESS;
9163 struct ice_sw_recipe *recp_list;
9164 u8 lport = hw->port_info->lport;
9165 struct LIST_HEAD_TYPE l_head;
9167 if (LIST_EMPTY(list_head))
9170 recp_list = &hw->switch_info->recp_list[recp_id];
9171 /* Move entries from the given list_head to a temporary l_head so that
9172 * they can be replayed. Otherwise when trying to re-add the same
9173 * filter, the function will return already exists
9175 LIST_REPLACE_INIT(list_head, &l_head);
9177 /* Mark the given list_head empty by reinitializing it so filters
9178 * could be added again by *handler
9180 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9182 struct ice_fltr_list_entry f_entry;
9185 f_entry.fltr_info = itr->fltr_info;
9186 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9187 status = ice_add_rule_internal(hw, recp_list, lport,
9189 if (status != ICE_SUCCESS)
9194 /* Add a filter per VSI separately */
9195 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9197 if (!ice_is_vsi_valid(hw, vsi_handle))
9200 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9201 f_entry.fltr_info.vsi_handle = vsi_handle;
9202 f_entry.fltr_info.fwd_id.hw_vsi_id =
9203 ice_get_hw_vsi_num(hw, vsi_handle);
9204 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9205 if (recp_id == ICE_SW_LKUP_VLAN)
9206 status = ice_add_vlan_internal(hw, recp_list,
9209 status = ice_add_rule_internal(hw, recp_list,
9212 if (status != ICE_SUCCESS)
9217 /* Clear the filter management list */
9218 ice_rem_sw_rule_info(hw, &l_head);
9223 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9224 * @hw: pointer to the hardware structure
9226 * NOTE: This function does not clean up partially added filters on error.
9227 * It is up to caller of the function to issue a reset or fail early.
9229 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9231 struct ice_switch_info *sw = hw->switch_info;
9232 enum ice_status status = ICE_SUCCESS;
9235 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9236 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9238 status = ice_replay_fltr(hw, i, head);
9239 if (status != ICE_SUCCESS)
9246 * ice_replay_vsi_fltr - Replay filters for requested VSI
9247 * @hw: pointer to the hardware structure
9248 * @pi: pointer to port information structure
9249 * @sw: pointer to switch info struct for which function replays filters
9250 * @vsi_handle: driver VSI handle
9251 * @recp_id: Recipe ID for which rules need to be replayed
9252 * @list_head: list for which filters need to be replayed
9254 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9255 * It is required to pass valid VSI handle.
9257 static enum ice_status
9258 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9259 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9260 struct LIST_HEAD_TYPE *list_head)
9262 struct ice_fltr_mgmt_list_entry *itr;
9263 enum ice_status status = ICE_SUCCESS;
9264 struct ice_sw_recipe *recp_list;
9267 if (LIST_EMPTY(list_head))
9269 recp_list = &sw->recp_list[recp_id];
9270 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9272 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9274 struct ice_fltr_list_entry f_entry;
9276 f_entry.fltr_info = itr->fltr_info;
9277 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9278 itr->fltr_info.vsi_handle == vsi_handle) {
9279 /* update the src in case it is VSI num */
9280 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9281 f_entry.fltr_info.src = hw_vsi_id;
9282 status = ice_add_rule_internal(hw, recp_list,
9285 if (status != ICE_SUCCESS)
9289 if (!itr->vsi_list_info ||
9290 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9292 /* Clearing it so that the logic can add it back */
9293 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9294 f_entry.fltr_info.vsi_handle = vsi_handle;
9295 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9296 /* update the src in case it is VSI num */
9297 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9298 f_entry.fltr_info.src = hw_vsi_id;
9299 if (recp_id == ICE_SW_LKUP_VLAN)
9300 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9302 status = ice_add_rule_internal(hw, recp_list,
9305 if (status != ICE_SUCCESS)
9313 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9314 * @hw: pointer to the hardware structure
9315 * @vsi_handle: driver VSI handle
9316 * @list_head: list for which filters need to be replayed
9318 * Replay the advanced rule for the given VSI.
9320 static enum ice_status
9321 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9322 struct LIST_HEAD_TYPE *list_head)
9324 struct ice_rule_query_data added_entry = { 0 };
9325 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9326 enum ice_status status = ICE_SUCCESS;
9328 if (LIST_EMPTY(list_head))
9330 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9332 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9333 u16 lk_cnt = adv_fltr->lkups_cnt;
9335 if (vsi_handle != rinfo->sw_act.vsi_handle)
9337 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9346 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9347 * @hw: pointer to the hardware structure
9348 * @pi: pointer to port information structure
9349 * @vsi_handle: driver VSI handle
9351 * Replays filters for requested VSI via vsi_handle.
9354 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9357 struct ice_switch_info *sw = hw->switch_info;
9358 enum ice_status status;
9361 /* Update the recipes that were created */
9362 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9363 struct LIST_HEAD_TYPE *head;
9365 head = &sw->recp_list[i].filt_replay_rules;
9366 if (!sw->recp_list[i].adv_rule)
9367 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9370 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9371 if (status != ICE_SUCCESS)
9379 * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9380 * @hw: pointer to the HW struct
9381 * @sw: pointer to switch info struct for which function removes filters
9383 * Deletes the filter replay rules for given switch
9385 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9392 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9393 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9394 struct LIST_HEAD_TYPE *l_head;
9396 l_head = &sw->recp_list[i].filt_replay_rules;
9397 if (!sw->recp_list[i].adv_rule)
9398 ice_rem_sw_rule_info(hw, l_head);
9400 ice_rem_adv_rule_info(hw, l_head);
9406 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9407 * @hw: pointer to the HW struct
9409 * Deletes the filter replay rules.
9411 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9413 ice_rm_sw_replay_rule_info(hw, hw->switch_info);