1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
541 { ICE_IPV4_OFOS, 14 },
545 { ICE_PROTOCOL_LAST, 0 },
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550 0x00, 0x00, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
554 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555 0x00, 0x00, 0x40, 0x00,
556 0x40, 0x11, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561 0x00, 0x00, 0x00, 0x00,
563 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
564 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x85,
567 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568 0x00, 0x00, 0x00, 0x00,
570 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571 0x00, 0x00, 0x40, 0x00,
572 0x40, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
581 { ICE_IPV4_OFOS, 14 },
585 { ICE_PROTOCOL_LAST, 0 },
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
594 0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595 0x00, 0x00, 0x40, 0x00,
596 0x40, 0x11, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00,
598 0x00, 0x00, 0x00, 0x00,
600 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601 0x00, 0x00, 0x00, 0x00,
603 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x85,
607 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608 0x00, 0x00, 0x00, 0x00,
610 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611 0x00, 0x00, 0x3b, 0x00,
612 0x00, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
614 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
627 { ICE_IPV6_OFOS, 14 },
631 { ICE_PROTOCOL_LAST, 0 },
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00,
640 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641 0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
651 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652 0x00, 0x00, 0x00, 0x00,
654 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x85,
658 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659 0x00, 0x00, 0x00, 0x00,
661 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662 0x00, 0x00, 0x40, 0x00,
663 0x40, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00,
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
673 { ICE_IPV6_OFOS, 14 },
677 { ICE_PROTOCOL_LAST, 0 },
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
686 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00,
691 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00,
693 0x00, 0x00, 0x00, 0x00,
694 0x00, 0x00, 0x00, 0x00,
695 0x00, 0x00, 0x00, 0x00,
697 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698 0x00, 0x00, 0x00, 0x00,
700 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 62 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x85,
704 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705 0x00, 0x00, 0x00, 0x00,
707 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708 0x00, 0x00, 0x3b, 0x00,
709 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00,
713 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00,
715 0x00, 0x00, 0x00, 0x00,
716 0x00, 0x00, 0x00, 0x00,
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
724 { ICE_IPV4_OFOS, 14 },
726 { ICE_GTP_NO_PAY, 42 },
727 { ICE_PROTOCOL_LAST, 0 },
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
733 { ICE_IPV6_OFOS, 14 },
735 { ICE_GTP_NO_PAY, 62 },
736 { ICE_PROTOCOL_LAST, 0 },
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
741 { ICE_ETYPE_OL, 12 },
742 { ICE_VLAN_OFOS, 14},
744 { ICE_PROTOCOL_LAST, 0 },
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
749 { ICE_ETYPE_OL, 12 },
750 { ICE_VLAN_OFOS, 14},
752 { ICE_IPV4_OFOS, 26 },
753 { ICE_PROTOCOL_LAST, 0 },
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758 0x00, 0x00, 0x00, 0x00,
759 0x00, 0x00, 0x00, 0x00,
761 0x81, 0x00, /* ICE_ETYPE_OL 12 */
763 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
765 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
768 0x00, 0x21, /* PPP Link Layer 24 */
770 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
782 { ICE_ETYPE_OL, 12 },
783 { ICE_VLAN_OFOS, 14},
785 { ICE_IPV4_OFOS, 26 },
787 { ICE_PROTOCOL_LAST, 0 },
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792 0x00, 0x00, 0x00, 0x00,
793 0x00, 0x00, 0x00, 0x00,
795 0x81, 0x00, /* ICE_ETYPE_OL 12 */
797 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
799 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
802 0x00, 0x21, /* PPP Link Layer 24 */
804 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805 0x00, 0x01, 0x00, 0x00,
806 0x00, 0x06, 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811 0x00, 0x00, 0x00, 0x00,
812 0x00, 0x00, 0x00, 0x00,
813 0x50, 0x00, 0x00, 0x00,
814 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
822 { ICE_ETYPE_OL, 12 },
823 { ICE_VLAN_OFOS, 14},
825 { ICE_IPV4_OFOS, 26 },
826 { ICE_UDP_ILOS, 46 },
827 { ICE_PROTOCOL_LAST, 0 },
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832 0x00, 0x00, 0x00, 0x00,
833 0x00, 0x00, 0x00, 0x00,
835 0x81, 0x00, /* ICE_ETYPE_OL 12 */
837 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
839 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
842 0x00, 0x21, /* PPP Link Layer 24 */
844 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845 0x00, 0x01, 0x00, 0x00,
846 0x00, 0x11, 0x00, 0x00,
847 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851 0x00, 0x08, 0x00, 0x00,
853 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
858 { ICE_ETYPE_OL, 12 },
859 { ICE_VLAN_OFOS, 14},
861 { ICE_IPV6_OFOS, 26 },
862 { ICE_PROTOCOL_LAST, 0 },
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867 0x00, 0x00, 0x00, 0x00,
868 0x00, 0x00, 0x00, 0x00,
870 0x81, 0x00, /* ICE_ETYPE_OL 12 */
872 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
874 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
877 0x00, 0x57, /* PPP Link Layer 24 */
879 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880 0x00, 0x00, 0x3b, 0x00,
881 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, 0x00, 0x00,
885 0x00, 0x00, 0x00, 0x00,
886 0x00, 0x00, 0x00, 0x00,
887 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
896 { ICE_ETYPE_OL, 12 },
897 { ICE_VLAN_OFOS, 14},
899 { ICE_IPV6_OFOS, 26 },
901 { ICE_PROTOCOL_LAST, 0 },
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x81, 0x00, /* ICE_ETYPE_OL 12 */
911 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
913 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
916 0x00, 0x57, /* PPP Link Layer 24 */
918 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
927 0x00, 0x00, 0x00, 0x00,
929 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00,
932 0x50, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
941 { ICE_ETYPE_OL, 12 },
942 { ICE_VLAN_OFOS, 14},
944 { ICE_IPV6_OFOS, 26 },
945 { ICE_UDP_ILOS, 66 },
946 { ICE_PROTOCOL_LAST, 0 },
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
954 0x81, 0x00, /* ICE_ETYPE_OL 12 */
956 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
958 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
961 0x00, 0x57, /* PPP Link Layer 24 */
963 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
967 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00,
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975 0x00, 0x08, 0x00, 0x00,
977 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
982 { ICE_IPV4_OFOS, 14 },
984 { ICE_PROTOCOL_LAST, 0 },
987 static const u8 dummy_ipv4_esp_pkt[] = {
988 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
993 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994 0x00, 0x00, 0x40, 0x00,
995 0x40, 0x32, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
997 0x00, 0x00, 0x00, 0x00,
999 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005 { ICE_MAC_OFOS, 0 },
1006 { ICE_IPV6_OFOS, 14 },
1008 { ICE_PROTOCOL_LAST, 0 },
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 0x00, 0x00, 0x00, 0x00,
1014 0x00, 0x00, 0x00, 0x00,
1017 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00,
1025 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034 { ICE_MAC_OFOS, 0 },
1035 { ICE_IPV4_OFOS, 14 },
1037 { ICE_PROTOCOL_LAST, 0 },
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00,
1046 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047 0x00, 0x00, 0x40, 0x00,
1048 0x40, 0x33, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059 { ICE_MAC_OFOS, 0 },
1060 { ICE_IPV6_OFOS, 14 },
1062 { ICE_PROTOCOL_LAST, 0 },
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067 0x00, 0x00, 0x00, 0x00,
1068 0x00, 0x00, 0x00, 0x00,
1071 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1077 0x00, 0x00, 0x00, 0x00,
1078 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00,
1082 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083 0x00, 0x00, 0x00, 0x00,
1084 0x00, 0x00, 0x00, 0x00,
1085 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089 { ICE_MAC_OFOS, 0 },
1090 { ICE_IPV4_OFOS, 14 },
1091 { ICE_UDP_ILOS, 34 },
1093 { ICE_PROTOCOL_LAST, 0 },
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1102 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103 0x00, 0x00, 0x40, 0x00,
1104 0x40, 0x11, 0x00, 0x00,
1105 0x00, 0x00, 0x00, 0x00,
1106 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109 0x00, 0x00, 0x00, 0x00,
1111 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00,
1113 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117 { ICE_MAC_OFOS, 0 },
1118 { ICE_IPV6_OFOS, 14 },
1119 { ICE_UDP_ILOS, 54 },
1121 { ICE_PROTOCOL_LAST, 0 },
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126 0x00, 0x00, 0x00, 0x00,
1127 0x00, 0x00, 0x00, 0x00,
1130 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132 0x00, 0x00, 0x00, 0x00,
1133 0x00, 0x00, 0x00, 0x00,
1134 0x00, 0x00, 0x00, 0x00,
1135 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00,
1137 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142 0x00, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1146 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_IPV4_OFOS, 14 },
1154 { ICE_PROTOCOL_LAST, 0 },
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1163 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164 0x00, 0x00, 0x40, 0x00,
1165 0x40, 0x73, 0x00, 0x00,
1166 0x00, 0x00, 0x00, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1169 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170 0x00, 0x00, 0x00, 0x00,
1171 0x00, 0x00, 0x00, 0x00,
1172 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176 { ICE_MAC_OFOS, 0 },
1177 { ICE_IPV6_OFOS, 14 },
1179 { ICE_PROTOCOL_LAST, 0 },
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184 0x00, 0x00, 0x00, 0x00,
1185 0x00, 0x00, 0x00, 0x00,
1188 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189 0x00, 0x0c, 0x73, 0x40,
1190 0x00, 0x00, 0x00, 0x00,
1191 0x00, 0x00, 0x00, 0x00,
1192 0x00, 0x00, 0x00, 0x00,
1193 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1196 0x00, 0x00, 0x00, 0x00,
1197 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1202 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1205 /* this is a recipe to profile association bitmap */
1206 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1207 ICE_MAX_NUM_PROFILES);
1209 /* this is a profile to recipe association bitmap */
1210 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1211 ICE_MAX_NUM_RECIPES);
1213 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1216 * ice_collect_result_idx - copy result index values
1217 * @buf: buffer that contains the result index
1218 * @recp: the recipe struct to copy data into
1220 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1221 struct ice_sw_recipe *recp)
1223 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1224 ice_set_bit(buf->content.result_indx &
1225 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1229 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1230 * @rid: recipe ID that we are populating
1232 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1234 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1235 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1236 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1237 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1238 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1239 u16 i, j, profile_num = 0;
1240 bool non_tun_valid = false;
1241 bool pppoe_valid = false;
1242 bool vxlan_valid = false;
1243 bool gre_valid = false;
1244 bool gtp_valid = false;
1245 bool flag_valid = false;
1247 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1248 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1253 for (i = 0; i < 12; i++) {
1254 if (gre_profile[i] == j)
1258 for (i = 0; i < 12; i++) {
1259 if (vxlan_profile[i] == j)
1263 for (i = 0; i < 7; i++) {
1264 if (pppoe_profile[i] == j)
1268 for (i = 0; i < 6; i++) {
1269 if (non_tun_profile[i] == j)
1270 non_tun_valid = true;
1273 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1274 j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1277 if ((j >= ICE_PROFID_IPV4_ESP &&
1278 j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1279 (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1280 j <= ICE_PROFID_IPV6_GTPU_TEID))
1284 if (!non_tun_valid && vxlan_valid)
1285 tun_type = ICE_SW_TUN_VXLAN;
1286 else if (!non_tun_valid && gre_valid)
1287 tun_type = ICE_SW_TUN_NVGRE;
1288 else if (!non_tun_valid && pppoe_valid)
1289 tun_type = ICE_SW_TUN_PPPOE;
1290 else if (!non_tun_valid && gtp_valid)
1291 tun_type = ICE_SW_TUN_GTP;
1292 else if (non_tun_valid &&
1293 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1294 tun_type = ICE_SW_TUN_AND_NON_TUN;
1295 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1297 tun_type = ICE_NON_TUN;
1299 tun_type = ICE_NON_TUN;
1301 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1302 i = ice_is_bit_set(recipe_to_profile[rid],
1303 ICE_PROFID_PPPOE_IPV4_OTHER);
1304 j = ice_is_bit_set(recipe_to_profile[rid],
1305 ICE_PROFID_PPPOE_IPV6_OTHER);
1307 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1309 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1312 if (tun_type == ICE_SW_TUN_GTP) {
1313 if (ice_is_bit_set(recipe_to_profile[rid],
1314 ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1315 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1316 else if (ice_is_bit_set(recipe_to_profile[rid],
1317 ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1318 tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1319 else if (ice_is_bit_set(recipe_to_profile[rid],
1320 ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1321 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1322 else if (ice_is_bit_set(recipe_to_profile[rid],
1323 ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1324 tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1327 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1328 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1329 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1331 case ICE_PROFID_IPV4_TCP:
1332 tun_type = ICE_SW_IPV4_TCP;
1334 case ICE_PROFID_IPV4_UDP:
1335 tun_type = ICE_SW_IPV4_UDP;
1337 case ICE_PROFID_IPV6_TCP:
1338 tun_type = ICE_SW_IPV6_TCP;
1340 case ICE_PROFID_IPV6_UDP:
1341 tun_type = ICE_SW_IPV6_UDP;
1343 case ICE_PROFID_PPPOE_PAY:
1344 tun_type = ICE_SW_TUN_PPPOE_PAY;
1346 case ICE_PROFID_PPPOE_IPV4_TCP:
1347 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1349 case ICE_PROFID_PPPOE_IPV4_UDP:
1350 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1352 case ICE_PROFID_PPPOE_IPV4_OTHER:
1353 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1355 case ICE_PROFID_PPPOE_IPV6_TCP:
1356 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1358 case ICE_PROFID_PPPOE_IPV6_UDP:
1359 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1361 case ICE_PROFID_PPPOE_IPV6_OTHER:
1362 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1364 case ICE_PROFID_IPV4_ESP:
1365 tun_type = ICE_SW_TUN_IPV4_ESP;
1367 case ICE_PROFID_IPV6_ESP:
1368 tun_type = ICE_SW_TUN_IPV6_ESP;
1370 case ICE_PROFID_IPV4_AH:
1371 tun_type = ICE_SW_TUN_IPV4_AH;
1373 case ICE_PROFID_IPV6_AH:
1374 tun_type = ICE_SW_TUN_IPV6_AH;
1376 case ICE_PROFID_IPV4_NAT_T:
1377 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1379 case ICE_PROFID_IPV6_NAT_T:
1380 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1382 case ICE_PROFID_IPV4_PFCP_NODE:
1384 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1386 case ICE_PROFID_IPV6_PFCP_NODE:
1388 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1390 case ICE_PROFID_IPV4_PFCP_SESSION:
1392 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1394 case ICE_PROFID_IPV6_PFCP_SESSION:
1396 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1398 case ICE_PROFID_MAC_IPV4_L2TPV3:
1399 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1401 case ICE_PROFID_MAC_IPV6_L2TPV3:
1402 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1404 case ICE_PROFID_IPV4_GTPU_TEID:
1405 tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1407 case ICE_PROFID_IPV6_GTPU_TEID:
1408 tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1423 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1424 * @hw: pointer to hardware structure
1425 * @recps: struct that we need to populate
1426 * @rid: recipe ID that we are populating
1427 * @refresh_required: true if we should get recipe to profile mapping from FW
1429 * This function is used to populate all the necessary entries into our
1430 * bookkeeping so that we have a current list of all the recipes that are
1431 * programmed in the firmware.
1433 static enum ice_status
1434 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1435 bool *refresh_required)
1437 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1438 struct ice_aqc_recipe_data_elem *tmp;
1439 u16 num_recps = ICE_MAX_NUM_RECIPES;
1440 struct ice_prot_lkup_ext *lkup_exts;
1441 enum ice_status status;
1445 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1447 /* we need a buffer big enough to accommodate all the recipes */
1448 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1449 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1451 return ICE_ERR_NO_MEMORY;
1453 tmp[0].recipe_indx = rid;
1454 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1455 /* non-zero status meaning recipe doesn't exist */
1459 /* Get recipe to profile map so that we can get the fv from lkups that
1460 * we read for a recipe from FW. Since we want to minimize the number of
1461 * times we make this FW call, just make one call and cache the copy
1462 * until a new recipe is added. This operation is only required the
1463 * first time to get the changes from FW. Then to search existing
1464 * entries we don't need to update the cache again until another recipe
1467 if (*refresh_required) {
1468 ice_get_recp_to_prof_map(hw);
1469 *refresh_required = false;
1472 /* Start populating all the entries for recps[rid] based on lkups from
1473 * firmware. Note that we are only creating the root recipe in our
1476 lkup_exts = &recps[rid].lkup_exts;
1478 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1479 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1480 struct ice_recp_grp_entry *rg_entry;
1481 u8 i, prof, idx, prot = 0;
1485 rg_entry = (struct ice_recp_grp_entry *)
1486 ice_malloc(hw, sizeof(*rg_entry));
1488 status = ICE_ERR_NO_MEMORY;
1492 idx = root_bufs.recipe_indx;
1493 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1495 /* Mark all result indices in this chain */
1496 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1497 ice_set_bit(root_bufs.content.result_indx &
1498 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1500 /* get the first profile that is associated with rid */
1501 prof = ice_find_first_bit(recipe_to_profile[idx],
1502 ICE_MAX_NUM_PROFILES);
1503 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1504 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1506 rg_entry->fv_idx[i] = lkup_indx;
1507 rg_entry->fv_mask[i] =
1508 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1510 /* If the recipe is a chained recipe then all its
1511 * child recipe's result will have a result index.
1512 * To fill fv_words we should not use those result
1513 * index, we only need the protocol ids and offsets.
1514 * We will skip all the fv_idx which stores result
1515 * index in them. We also need to skip any fv_idx which
1516 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1517 * valid offset value.
1519 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1520 rg_entry->fv_idx[i]) ||
1521 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1522 rg_entry->fv_idx[i] == 0)
1525 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1526 rg_entry->fv_idx[i], &prot, &off);
1527 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1528 lkup_exts->fv_words[fv_word_idx].off = off;
1529 lkup_exts->field_mask[fv_word_idx] =
1530 rg_entry->fv_mask[i];
1533 /* populate rg_list with the data from the child entry of this
1536 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1538 /* Propagate some data to the recipe database */
1539 recps[idx].is_root = !!is_root;
1540 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1541 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1542 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1543 recps[idx].chain_idx = root_bufs.content.result_indx &
1544 ~ICE_AQ_RECIPE_RESULT_EN;
1545 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1547 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1553 /* Only do the following for root recipes entries */
1554 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1555 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1556 recps[idx].root_rid = root_bufs.content.rid &
1557 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1558 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1561 /* Complete initialization of the root recipe entry */
1562 lkup_exts->n_val_words = fv_word_idx;
1563 recps[rid].big_recp = (num_recps > 1);
1564 recps[rid].n_grp_count = (u8)num_recps;
1565 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1566 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1567 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1568 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1569 if (!recps[rid].root_buf)
1572 /* Copy result indexes */
1573 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1574 recps[rid].recp_created = true;
1582 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1583 * @hw: pointer to hardware structure
1585 * This function is used to populate recipe_to_profile matrix where index to
1586 * this array is the recipe ID and the element is the mapping of which profiles
1587 * is this recipe mapped to.
1589 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1591 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1594 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1597 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1598 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1599 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1601 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1602 ICE_MAX_NUM_RECIPES);
1603 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1604 ice_set_bit(i, recipe_to_profile[j]);
1609 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1610 * @hw: pointer to the HW struct
1611 * @recp_list: pointer to sw recipe list
1613 * Allocate memory for the entire recipe table and initialize the structures/
1614 * entries corresponding to basic recipes.
1617 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1619 struct ice_sw_recipe *recps;
1622 recps = (struct ice_sw_recipe *)
1623 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1625 return ICE_ERR_NO_MEMORY;
1627 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1628 recps[i].root_rid = i;
1629 INIT_LIST_HEAD(&recps[i].filt_rules);
1630 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1631 INIT_LIST_HEAD(&recps[i].rg_list);
1632 ice_init_lock(&recps[i].filt_rule_lock);
1641 * ice_aq_get_sw_cfg - get switch configuration
1642 * @hw: pointer to the hardware structure
1643 * @buf: pointer to the result buffer
1644 * @buf_size: length of the buffer available for response
1645 * @req_desc: pointer to requested descriptor
1646 * @num_elems: pointer to number of elements
1647 * @cd: pointer to command details structure or NULL
1649 * Get switch configuration (0x0200) to be placed in buf.
1650 * This admin command returns information such as initial VSI/port number
1651 * and switch ID it belongs to.
1653 * NOTE: *req_desc is both an input/output parameter.
1654 * The caller of this function first calls this function with *request_desc set
1655 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1656 * configuration information has been returned; if non-zero (meaning not all
1657 * the information was returned), the caller should call this function again
1658 * with *req_desc set to the previous value returned by f/w to get the
1659 * next block of switch configuration information.
1661 * *num_elems is output only parameter. This reflects the number of elements
1662 * in response buffer. The caller of this function to use *num_elems while
1663 * parsing the response buffer.
1665 static enum ice_status
1666 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1667 u16 buf_size, u16 *req_desc, u16 *num_elems,
1668 struct ice_sq_cd *cd)
1670 struct ice_aqc_get_sw_cfg *cmd;
1671 struct ice_aq_desc desc;
1672 enum ice_status status;
1674 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1675 cmd = &desc.params.get_sw_conf;
1676 cmd->element = CPU_TO_LE16(*req_desc);
1678 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1680 *req_desc = LE16_TO_CPU(cmd->element);
1681 *num_elems = LE16_TO_CPU(cmd->num_elems);
1688 * ice_alloc_sw - allocate resources specific to switch
1689 * @hw: pointer to the HW struct
1690 * @ena_stats: true to turn on VEB stats
1691 * @shared_res: true for shared resource, false for dedicated resource
1692 * @sw_id: switch ID returned
1693 * @counter_id: VEB counter ID returned
1695 * allocates switch resources (SWID and VEB counter) (0x0208)
1698 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1701 struct ice_aqc_alloc_free_res_elem *sw_buf;
1702 struct ice_aqc_res_elem *sw_ele;
1703 enum ice_status status;
1706 buf_len = ice_struct_size(sw_buf, elem, 1);
1707 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1709 return ICE_ERR_NO_MEMORY;
1711 /* Prepare buffer for switch ID.
1712 * The number of resource entries in buffer is passed as 1 since only a
1713 * single switch/VEB instance is allocated, and hence a single sw_id
1716 sw_buf->num_elems = CPU_TO_LE16(1);
1718 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1719 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1720 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1722 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1723 ice_aqc_opc_alloc_res, NULL);
1726 goto ice_alloc_sw_exit;
1728 sw_ele = &sw_buf->elem[0];
1729 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1732 /* Prepare buffer for VEB Counter */
1733 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1734 struct ice_aqc_alloc_free_res_elem *counter_buf;
1735 struct ice_aqc_res_elem *counter_ele;
1737 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1738 ice_malloc(hw, buf_len);
1740 status = ICE_ERR_NO_MEMORY;
1741 goto ice_alloc_sw_exit;
1744 /* The number of resource entries in buffer is passed as 1 since
1745 * only a single switch/VEB instance is allocated, and hence a
1746 * single VEB counter is requested.
1748 counter_buf->num_elems = CPU_TO_LE16(1);
1749 counter_buf->res_type =
1750 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1751 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1752 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1756 ice_free(hw, counter_buf);
1757 goto ice_alloc_sw_exit;
1759 counter_ele = &counter_buf->elem[0];
1760 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1761 ice_free(hw, counter_buf);
1765 ice_free(hw, sw_buf);
1770 * ice_free_sw - free resources specific to switch
1771 * @hw: pointer to the HW struct
1772 * @sw_id: switch ID returned
1773 * @counter_id: VEB counter ID returned
1775 * free switch resources (SWID and VEB counter) (0x0209)
1777 * NOTE: This function frees multiple resources. It continues
1778 * releasing other resources even after it encounters error.
1779 * The error code returned is the last error it encountered.
1781 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1783 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1784 enum ice_status status, ret_status;
1787 buf_len = ice_struct_size(sw_buf, elem, 1);
1788 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1790 return ICE_ERR_NO_MEMORY;
1792 /* Prepare buffer to free for switch ID res.
1793 * The number of resource entries in buffer is passed as 1 since only a
1794 * single switch/VEB instance is freed, and hence a single sw_id
1797 sw_buf->num_elems = CPU_TO_LE16(1);
1798 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1799 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1801 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802 ice_aqc_opc_free_res, NULL);
1805 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1807 /* Prepare buffer to free for VEB Counter resource */
1808 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1809 ice_malloc(hw, buf_len);
1811 ice_free(hw, sw_buf);
1812 return ICE_ERR_NO_MEMORY;
1815 /* The number of resource entries in buffer is passed as 1 since only a
1816 * single switch/VEB instance is freed, and hence a single VEB counter
1819 counter_buf->num_elems = CPU_TO_LE16(1);
1820 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1821 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1823 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1824 ice_aqc_opc_free_res, NULL);
1826 ice_debug(hw, ICE_DBG_SW,
1827 "VEB counter resource could not be freed\n");
1828 ret_status = status;
1831 ice_free(hw, counter_buf);
1832 ice_free(hw, sw_buf);
1838 * @hw: pointer to the HW struct
1839 * @vsi_ctx: pointer to a VSI context struct
1840 * @cd: pointer to command details structure or NULL
1842 * Add a VSI context to the hardware (0x0210)
1845 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1846 struct ice_sq_cd *cd)
1848 struct ice_aqc_add_update_free_vsi_resp *res;
1849 struct ice_aqc_add_get_update_free_vsi *cmd;
1850 struct ice_aq_desc desc;
1851 enum ice_status status;
1853 cmd = &desc.params.vsi_cmd;
1854 res = &desc.params.add_update_free_vsi_res;
1856 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1858 if (!vsi_ctx->alloc_from_pool)
1859 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1860 ICE_AQ_VSI_IS_VALID);
1862 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1864 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1866 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1867 sizeof(vsi_ctx->info), cd);
1870 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1871 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1872 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1880 * @hw: pointer to the HW struct
1881 * @vsi_ctx: pointer to a VSI context struct
1882 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1883 * @cd: pointer to command details structure or NULL
1885 * Free VSI context info from hardware (0x0213)
1888 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1889 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1891 struct ice_aqc_add_update_free_vsi_resp *resp;
1892 struct ice_aqc_add_get_update_free_vsi *cmd;
1893 struct ice_aq_desc desc;
1894 enum ice_status status;
1896 cmd = &desc.params.vsi_cmd;
1897 resp = &desc.params.add_update_free_vsi_res;
1899 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1901 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1903 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1905 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1907 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1908 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1916 * @hw: pointer to the HW struct
1917 * @vsi_ctx: pointer to a VSI context struct
1918 * @cd: pointer to command details structure or NULL
1920 * Update VSI context in the hardware (0x0211)
1923 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1924 struct ice_sq_cd *cd)
1926 struct ice_aqc_add_update_free_vsi_resp *resp;
1927 struct ice_aqc_add_get_update_free_vsi *cmd;
1928 struct ice_aq_desc desc;
1929 enum ice_status status;
1931 cmd = &desc.params.vsi_cmd;
1932 resp = &desc.params.add_update_free_vsi_res;
1934 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1936 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1938 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1940 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1941 sizeof(vsi_ctx->info), cd);
1944 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1945 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1952 * ice_is_vsi_valid - check whether the VSI is valid or not
1953 * @hw: pointer to the HW struct
1954 * @vsi_handle: VSI handle
1956 * check whether the VSI is valid or not
1958 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1960 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1964 * ice_get_hw_vsi_num - return the HW VSI number
1965 * @hw: pointer to the HW struct
1966 * @vsi_handle: VSI handle
1968 * return the HW VSI number
1969 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1971 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1973 return hw->vsi_ctx[vsi_handle]->vsi_num;
1977 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1978 * @hw: pointer to the HW struct
1979 * @vsi_handle: VSI handle
1981 * return the VSI context entry for a given VSI handle
1983 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1985 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1989 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1990 * @hw: pointer to the HW struct
1991 * @vsi_handle: VSI handle
1992 * @vsi: VSI context pointer
1994 * save the VSI context entry for a given VSI handle
1997 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1999 hw->vsi_ctx[vsi_handle] = vsi;
2003 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2004 * @hw: pointer to the HW struct
2005 * @vsi_handle: VSI handle
2007 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2009 struct ice_vsi_ctx *vsi;
2012 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2015 ice_for_each_traffic_class(i) {
2016 if (vsi->lan_q_ctx[i]) {
2017 ice_free(hw, vsi->lan_q_ctx[i]);
2018 vsi->lan_q_ctx[i] = NULL;
2024 * ice_clear_vsi_ctx - clear the VSI context entry
2025 * @hw: pointer to the HW struct
2026 * @vsi_handle: VSI handle
2028 * clear the VSI context entry
2030 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2032 struct ice_vsi_ctx *vsi;
2034 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2036 ice_clear_vsi_q_ctx(hw, vsi_handle);
2038 hw->vsi_ctx[vsi_handle] = NULL;
2043 * ice_clear_all_vsi_ctx - clear all the VSI context entries
2044 * @hw: pointer to the HW struct
2046 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2050 for (i = 0; i < ICE_MAX_VSI; i++)
2051 ice_clear_vsi_ctx(hw, i);
2055 * ice_add_vsi - add VSI context to the hardware and VSI handle list
2056 * @hw: pointer to the HW struct
2057 * @vsi_handle: unique VSI handle provided by drivers
2058 * @vsi_ctx: pointer to a VSI context struct
2059 * @cd: pointer to command details structure or NULL
2061 * Add a VSI context to the hardware also add it into the VSI handle list.
2062 * If this function gets called after reset for existing VSIs then update
2063 * with the new HW VSI number in the corresponding VSI handle list entry.
2066 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2067 struct ice_sq_cd *cd)
2069 struct ice_vsi_ctx *tmp_vsi_ctx;
2070 enum ice_status status;
2072 if (vsi_handle >= ICE_MAX_VSI)
2073 return ICE_ERR_PARAM;
2074 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2077 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2079 /* Create a new VSI context */
2080 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2081 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2083 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2084 return ICE_ERR_NO_MEMORY;
2086 *tmp_vsi_ctx = *vsi_ctx;
2088 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2090 /* update with new HW VSI num */
2091 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2098 * ice_free_vsi- free VSI context from hardware and VSI handle list
2099 * @hw: pointer to the HW struct
2100 * @vsi_handle: unique VSI handle
2101 * @vsi_ctx: pointer to a VSI context struct
2102 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2103 * @cd: pointer to command details structure or NULL
2105 * Free VSI context info from hardware as well as from VSI handle list
2108 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2109 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2111 enum ice_status status;
2113 if (!ice_is_vsi_valid(hw, vsi_handle))
2114 return ICE_ERR_PARAM;
2115 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2116 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2118 ice_clear_vsi_ctx(hw, vsi_handle);
2124 * @hw: pointer to the HW struct
2125 * @vsi_handle: unique VSI handle
2126 * @vsi_ctx: pointer to a VSI context struct
2127 * @cd: pointer to command details structure or NULL
2129 * Update VSI context in the hardware
2132 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2133 struct ice_sq_cd *cd)
2135 if (!ice_is_vsi_valid(hw, vsi_handle))
2136 return ICE_ERR_PARAM;
2137 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2138 return ice_aq_update_vsi(hw, vsi_ctx, cd);
2142 * ice_aq_get_vsi_params
2143 * @hw: pointer to the HW struct
2144 * @vsi_ctx: pointer to a VSI context struct
2145 * @cd: pointer to command details structure or NULL
2147 * Get VSI context info from hardware (0x0212)
2150 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2151 struct ice_sq_cd *cd)
2153 struct ice_aqc_add_get_update_free_vsi *cmd;
2154 struct ice_aqc_get_vsi_resp *resp;
2155 struct ice_aq_desc desc;
2156 enum ice_status status;
2158 cmd = &desc.params.vsi_cmd;
2159 resp = &desc.params.get_vsi_resp;
2161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2163 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2165 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2166 sizeof(vsi_ctx->info), cd);
2168 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2170 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2171 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2178 * ice_aq_add_update_mir_rule - add/update a mirror rule
2179 * @hw: pointer to the HW struct
2180 * @rule_type: Rule Type
2181 * @dest_vsi: VSI number to which packets will be mirrored
2182 * @count: length of the list
2183 * @mr_buf: buffer for list of mirrored VSI numbers
2184 * @cd: pointer to command details structure or NULL
2187 * Add/Update Mirror Rule (0x260).
2190 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2191 u16 count, struct ice_mir_rule_buf *mr_buf,
2192 struct ice_sq_cd *cd, u16 *rule_id)
2194 struct ice_aqc_add_update_mir_rule *cmd;
2195 struct ice_aq_desc desc;
2196 enum ice_status status;
2197 __le16 *mr_list = NULL;
2200 switch (rule_type) {
2201 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2202 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2203 /* Make sure count and mr_buf are set for these rule_types */
2204 if (!(count && mr_buf))
2205 return ICE_ERR_PARAM;
2207 buf_size = count * sizeof(__le16);
2208 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2210 return ICE_ERR_NO_MEMORY;
2212 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2213 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2214 /* Make sure count and mr_buf are not set for these
2217 if (count || mr_buf)
2218 return ICE_ERR_PARAM;
2221 ice_debug(hw, ICE_DBG_SW,
2222 "Error due to unsupported rule_type %u\n", rule_type);
2223 return ICE_ERR_OUT_OF_RANGE;
2226 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2228 /* Pre-process 'mr_buf' items for add/update of virtual port
2229 * ingress/egress mirroring (but not physical port ingress/egress
2235 for (i = 0; i < count; i++) {
2238 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2240 /* Validate specified VSI number, make sure it is less
2241 * than ICE_MAX_VSI, if not return with error.
2243 if (id >= ICE_MAX_VSI) {
2244 ice_debug(hw, ICE_DBG_SW,
2245 "Error VSI index (%u) out-of-range\n",
2247 ice_free(hw, mr_list);
2248 return ICE_ERR_OUT_OF_RANGE;
2251 /* add VSI to mirror rule */
2254 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2255 else /* remove VSI from mirror rule */
2256 mr_list[i] = CPU_TO_LE16(id);
2260 cmd = &desc.params.add_update_rule;
2261 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2262 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2263 ICE_AQC_RULE_ID_VALID_M);
2264 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2265 cmd->num_entries = CPU_TO_LE16(count);
2266 cmd->dest = CPU_TO_LE16(dest_vsi);
2268 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2270 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2272 ice_free(hw, mr_list);
2278 * ice_aq_delete_mir_rule - delete a mirror rule
2279 * @hw: pointer to the HW struct
2280 * @rule_id: Mirror rule ID (to be deleted)
2281 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2282 * otherwise it is returned to the shared pool
2283 * @cd: pointer to command details structure or NULL
2285 * Delete Mirror Rule (0x261).
2288 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2289 struct ice_sq_cd *cd)
2291 struct ice_aqc_delete_mir_rule *cmd;
2292 struct ice_aq_desc desc;
2294 /* rule_id should be in the range 0...63 */
2295 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2296 return ICE_ERR_OUT_OF_RANGE;
2298 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2300 cmd = &desc.params.del_rule;
2301 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2302 cmd->rule_id = CPU_TO_LE16(rule_id);
2305 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2307 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2311 * ice_aq_alloc_free_vsi_list
2312 * @hw: pointer to the HW struct
2313 * @vsi_list_id: VSI list ID returned or used for lookup
2314 * @lkup_type: switch rule filter lookup type
2315 * @opc: switch rules population command type - pass in the command opcode
2317 * allocates or free a VSI list resource
2319 static enum ice_status
2320 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2321 enum ice_sw_lkup_type lkup_type,
2322 enum ice_adminq_opc opc)
2324 struct ice_aqc_alloc_free_res_elem *sw_buf;
2325 struct ice_aqc_res_elem *vsi_ele;
2326 enum ice_status status;
2329 buf_len = ice_struct_size(sw_buf, elem, 1);
2330 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2332 return ICE_ERR_NO_MEMORY;
2333 sw_buf->num_elems = CPU_TO_LE16(1);
2335 if (lkup_type == ICE_SW_LKUP_MAC ||
2336 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2337 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2338 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2339 lkup_type == ICE_SW_LKUP_PROMISC ||
2340 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2341 lkup_type == ICE_SW_LKUP_LAST) {
2342 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2343 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2345 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2347 status = ICE_ERR_PARAM;
2348 goto ice_aq_alloc_free_vsi_list_exit;
2351 if (opc == ice_aqc_opc_free_res)
2352 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2354 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2356 goto ice_aq_alloc_free_vsi_list_exit;
2358 if (opc == ice_aqc_opc_alloc_res) {
2359 vsi_ele = &sw_buf->elem[0];
2360 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2363 ice_aq_alloc_free_vsi_list_exit:
2364 ice_free(hw, sw_buf);
2369 * ice_aq_set_storm_ctrl - Sets storm control configuration
2370 * @hw: pointer to the HW struct
2371 * @bcast_thresh: represents the upper threshold for broadcast storm control
2372 * @mcast_thresh: represents the upper threshold for multicast storm control
2373 * @ctl_bitmask: storm control control knobs
2375 * Sets the storm control configuration (0x0280)
2378 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2381 struct ice_aqc_storm_cfg *cmd;
2382 struct ice_aq_desc desc;
2384 cmd = &desc.params.storm_conf;
2386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2388 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2389 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2390 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2392 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2396 * ice_aq_get_storm_ctrl - gets storm control configuration
2397 * @hw: pointer to the HW struct
2398 * @bcast_thresh: represents the upper threshold for broadcast storm control
2399 * @mcast_thresh: represents the upper threshold for multicast storm control
2400 * @ctl_bitmask: storm control control knobs
2402 * Gets the storm control configuration (0x0281)
2405 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2408 enum ice_status status;
2409 struct ice_aq_desc desc;
2411 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2413 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2415 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2418 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2421 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2424 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2431 * ice_aq_sw_rules - add/update/remove switch rules
2432 * @hw: pointer to the HW struct
2433 * @rule_list: pointer to switch rule population list
2434 * @rule_list_sz: total size of the rule list in bytes
2435 * @num_rules: number of switch rules in the rule_list
2436 * @opc: switch rules population command type - pass in the command opcode
2437 * @cd: pointer to command details structure or NULL
2439 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2441 static enum ice_status
2442 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2443 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2445 struct ice_aq_desc desc;
2446 enum ice_status status;
2448 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2450 if (opc != ice_aqc_opc_add_sw_rules &&
2451 opc != ice_aqc_opc_update_sw_rules &&
2452 opc != ice_aqc_opc_remove_sw_rules)
2453 return ICE_ERR_PARAM;
2455 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2457 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2458 desc.params.sw_rules.num_rules_fltr_entry_index =
2459 CPU_TO_LE16(num_rules);
2460 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2461 if (opc != ice_aqc_opc_add_sw_rules &&
2462 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2463 status = ICE_ERR_DOES_NOT_EXIST;
2469 * ice_aq_add_recipe - add switch recipe
2470 * @hw: pointer to the HW struct
2471 * @s_recipe_list: pointer to switch rule population list
2472 * @num_recipes: number of switch recipes in the list
2473 * @cd: pointer to command details structure or NULL
2478 ice_aq_add_recipe(struct ice_hw *hw,
2479 struct ice_aqc_recipe_data_elem *s_recipe_list,
2480 u16 num_recipes, struct ice_sq_cd *cd)
2482 struct ice_aqc_add_get_recipe *cmd;
2483 struct ice_aq_desc desc;
2486 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2487 cmd = &desc.params.add_get_recipe;
2488 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2490 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2491 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2493 buf_size = num_recipes * sizeof(*s_recipe_list);
2495 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2499 * ice_aq_get_recipe - get switch recipe
2500 * @hw: pointer to the HW struct
2501 * @s_recipe_list: pointer to switch rule population list
2502 * @num_recipes: pointer to the number of recipes (input and output)
2503 * @recipe_root: root recipe number of recipe(s) to retrieve
2504 * @cd: pointer to command details structure or NULL
2508 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2509 * On output, *num_recipes will equal the number of entries returned in
2512 * The caller must supply enough space in s_recipe_list to hold all possible
2513 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2516 ice_aq_get_recipe(struct ice_hw *hw,
2517 struct ice_aqc_recipe_data_elem *s_recipe_list,
2518 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2520 struct ice_aqc_add_get_recipe *cmd;
2521 struct ice_aq_desc desc;
2522 enum ice_status status;
2525 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2526 return ICE_ERR_PARAM;
2528 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2529 cmd = &desc.params.add_get_recipe;
2530 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2532 cmd->return_index = CPU_TO_LE16(recipe_root);
2533 cmd->num_sub_recipes = 0;
2535 buf_size = *num_recipes * sizeof(*s_recipe_list);
2537 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2538 /* cppcheck-suppress constArgument */
2539 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2545 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2546 * @hw: pointer to the HW struct
2547 * @profile_id: package profile ID to associate the recipe with
2548 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2549 * @cd: pointer to command details structure or NULL
2550 * Recipe to profile association (0x0291)
2553 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2554 struct ice_sq_cd *cd)
2556 struct ice_aqc_recipe_to_profile *cmd;
2557 struct ice_aq_desc desc;
2559 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2560 cmd = &desc.params.recipe_to_profile;
2561 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2562 cmd->profile_id = CPU_TO_LE16(profile_id);
2563 /* Set the recipe ID bit in the bitmask to let the device know which
2564 * profile we are associating the recipe to
2566 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2567 ICE_NONDMA_TO_NONDMA);
2569 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2573 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2574 * @hw: pointer to the HW struct
2575 * @profile_id: package profile ID to associate the recipe with
2576 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2577 * @cd: pointer to command details structure or NULL
2578 * Associate profile ID with given recipe (0x0293)
2581 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2582 struct ice_sq_cd *cd)
2584 struct ice_aqc_recipe_to_profile *cmd;
2585 struct ice_aq_desc desc;
2586 enum ice_status status;
2588 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2589 cmd = &desc.params.recipe_to_profile;
2590 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2591 cmd->profile_id = CPU_TO_LE16(profile_id);
2593 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2595 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2596 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2602 * ice_alloc_recipe - add recipe resource
2603 * @hw: pointer to the hardware structure
2604 * @rid: recipe ID returned as response to AQ call
2606 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2608 struct ice_aqc_alloc_free_res_elem *sw_buf;
2609 enum ice_status status;
2612 buf_len = ice_struct_size(sw_buf, elem, 1);
2613 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2615 return ICE_ERR_NO_MEMORY;
2617 sw_buf->num_elems = CPU_TO_LE16(1);
2618 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2619 ICE_AQC_RES_TYPE_S) |
2620 ICE_AQC_RES_TYPE_FLAG_SHARED);
2621 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2622 ice_aqc_opc_alloc_res, NULL);
2624 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2625 ice_free(hw, sw_buf);
2630 /* ice_init_port_info - Initialize port_info with switch configuration data
2631 * @pi: pointer to port_info
2632 * @vsi_port_num: VSI number or port number
2633 * @type: Type of switch element (port or VSI)
2634 * @swid: switch ID of the switch the element is attached to
2635 * @pf_vf_num: PF or VF number
2636 * @is_vf: true if the element is a VF, false otherwise
2639 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2640 u16 swid, u16 pf_vf_num, bool is_vf)
2643 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2644 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2646 pi->pf_vf_num = pf_vf_num;
2648 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2649 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2652 ice_debug(pi->hw, ICE_DBG_SW,
2653 "incorrect VSI/port type received\n");
2658 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2659 * @hw: pointer to the hardware structure
2661 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2663 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2664 enum ice_status status;
2671 num_total_ports = 1;
2673 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2674 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2677 return ICE_ERR_NO_MEMORY;
2679 /* Multiple calls to ice_aq_get_sw_cfg may be required
2680 * to get all the switch configuration information. The need
2681 * for additional calls is indicated by ice_aq_get_sw_cfg
2682 * writing a non-zero value in req_desc
2685 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2687 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2688 &req_desc, &num_elems, NULL);
2693 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2694 u16 pf_vf_num, swid, vsi_port_num;
2698 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2699 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2701 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2702 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2704 swid = LE16_TO_CPU(ele->swid);
2706 if (LE16_TO_CPU(ele->pf_vf_num) &
2707 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2710 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2711 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2714 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2715 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2716 if (j == num_total_ports) {
2717 ice_debug(hw, ICE_DBG_SW,
2718 "more ports than expected\n");
2719 status = ICE_ERR_CFG;
2722 ice_init_port_info(hw->port_info,
2723 vsi_port_num, res_type, swid,
2731 } while (req_desc && !status);
2734 ice_free(hw, (void *)rbuf);
2739 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2740 * @hw: pointer to the hardware structure
2741 * @fi: filter info structure to fill/update
2743 * This helper function populates the lb_en and lan_en elements of the provided
2744 * ice_fltr_info struct using the switch's type and characteristics of the
2745 * switch rule being configured.
2747 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2749 if ((fi->flag & ICE_FLTR_RX) &&
2750 (fi->fltr_act == ICE_FWD_TO_VSI ||
2751 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2752 fi->lkup_type == ICE_SW_LKUP_LAST)
2756 if ((fi->flag & ICE_FLTR_TX) &&
2757 (fi->fltr_act == ICE_FWD_TO_VSI ||
2758 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2759 fi->fltr_act == ICE_FWD_TO_Q ||
2760 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2761 /* Setting LB for prune actions will result in replicated
2762 * packets to the internal switch that will be dropped.
2764 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2767 /* Set lan_en to TRUE if
2768 * 1. The switch is a VEB AND
2770 * 2.1 The lookup is a directional lookup like ethertype,
2771 * promiscuous, ethertype-MAC, promiscuous-VLAN
2772 * and default-port OR
2773 * 2.2 The lookup is VLAN, OR
2774 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2775 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2779 * The switch is a VEPA.
2781 * In all other cases, the LAN enable has to be set to false.
2784 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2785 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2786 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2787 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2788 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2789 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2790 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2791 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2792 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2793 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2802 * ice_fill_sw_rule - Helper function to fill switch rule structure
2803 * @hw: pointer to the hardware structure
2804 * @f_info: entry containing packet forwarding information
2805 * @s_rule: switch rule structure to be filled in based on mac_entry
2806 * @opc: switch rules population command type - pass in the command opcode
2809 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2810 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2812 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2820 if (opc == ice_aqc_opc_remove_sw_rules) {
2821 s_rule->pdata.lkup_tx_rx.act = 0;
2822 s_rule->pdata.lkup_tx_rx.index =
2823 CPU_TO_LE16(f_info->fltr_rule_id);
2824 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2828 eth_hdr_sz = sizeof(dummy_eth_header);
2829 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2831 /* initialize the ether header with a dummy header */
2832 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2833 ice_fill_sw_info(hw, f_info);
2835 switch (f_info->fltr_act) {
2836 case ICE_FWD_TO_VSI:
2837 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2838 ICE_SINGLE_ACT_VSI_ID_M;
2839 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2840 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2841 ICE_SINGLE_ACT_VALID_BIT;
2843 case ICE_FWD_TO_VSI_LIST:
2844 act |= ICE_SINGLE_ACT_VSI_LIST;
2845 act |= (f_info->fwd_id.vsi_list_id <<
2846 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2847 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2848 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2849 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2850 ICE_SINGLE_ACT_VALID_BIT;
2853 act |= ICE_SINGLE_ACT_TO_Q;
2854 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2855 ICE_SINGLE_ACT_Q_INDEX_M;
2857 case ICE_DROP_PACKET:
2858 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2859 ICE_SINGLE_ACT_VALID_BIT;
2861 case ICE_FWD_TO_QGRP:
2862 q_rgn = f_info->qgrp_size > 0 ?
2863 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2864 act |= ICE_SINGLE_ACT_TO_Q;
2865 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2866 ICE_SINGLE_ACT_Q_INDEX_M;
2867 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2868 ICE_SINGLE_ACT_Q_REGION_M;
2875 act |= ICE_SINGLE_ACT_LB_ENABLE;
2877 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2879 switch (f_info->lkup_type) {
2880 case ICE_SW_LKUP_MAC:
2881 daddr = f_info->l_data.mac.mac_addr;
2883 case ICE_SW_LKUP_VLAN:
2884 vlan_id = f_info->l_data.vlan.vlan_id;
2885 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2886 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2887 act |= ICE_SINGLE_ACT_PRUNE;
2888 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2891 case ICE_SW_LKUP_ETHERTYPE_MAC:
2892 daddr = f_info->l_data.ethertype_mac.mac_addr;
2894 case ICE_SW_LKUP_ETHERTYPE:
2895 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2896 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2898 case ICE_SW_LKUP_MAC_VLAN:
2899 daddr = f_info->l_data.mac_vlan.mac_addr;
2900 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2902 case ICE_SW_LKUP_PROMISC_VLAN:
2903 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2905 case ICE_SW_LKUP_PROMISC:
2906 daddr = f_info->l_data.mac_vlan.mac_addr;
2912 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2913 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2914 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2916 /* Recipe set depending on lookup type */
2917 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2918 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2919 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2922 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2923 ICE_NONDMA_TO_NONDMA);
2925 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2926 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2927 *off = CPU_TO_BE16(vlan_id);
2930 /* Create the switch rule with the final dummy Ethernet header */
2931 if (opc != ice_aqc_opc_update_sw_rules)
2932 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2936 * ice_add_marker_act
2937 * @hw: pointer to the hardware structure
2938 * @m_ent: the management entry for which sw marker needs to be added
2939 * @sw_marker: sw marker to tag the Rx descriptor with
2940 * @l_id: large action resource ID
2942 * Create a large action to hold software marker and update the switch rule
2943 * entry pointed by m_ent with newly created large action
2945 static enum ice_status
2946 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2947 u16 sw_marker, u16 l_id)
2949 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2950 /* For software marker we need 3 large actions
2951 * 1. FWD action: FWD TO VSI or VSI LIST
2952 * 2. GENERIC VALUE action to hold the profile ID
2953 * 3. GENERIC VALUE action to hold the software marker ID
2955 const u16 num_lg_acts = 3;
2956 enum ice_status status;
2962 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2963 return ICE_ERR_PARAM;
2965 /* Create two back-to-back switch rules and submit them to the HW using
2966 * one memory buffer:
2970 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2971 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2972 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2974 return ICE_ERR_NO_MEMORY;
2976 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2978 /* Fill in the first switch rule i.e. large action */
2979 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2980 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2981 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2983 /* First action VSI forwarding or VSI list forwarding depending on how
2986 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2987 m_ent->fltr_info.fwd_id.hw_vsi_id;
2989 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2990 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2991 ICE_LG_ACT_VSI_LIST_ID_M;
2992 if (m_ent->vsi_count > 1)
2993 act |= ICE_LG_ACT_VSI_LIST;
2994 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2996 /* Second action descriptor type */
2997 act = ICE_LG_ACT_GENERIC;
2999 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3000 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3002 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3003 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3005 /* Third action Marker value */
3006 act |= ICE_LG_ACT_GENERIC;
3007 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3008 ICE_LG_ACT_GENERIC_VALUE_M;
3010 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3012 /* call the fill switch rule to fill the lookup Tx Rx structure */
3013 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3014 ice_aqc_opc_update_sw_rules);
3016 /* Update the action to point to the large action ID */
3017 rx_tx->pdata.lkup_tx_rx.act =
3018 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3019 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3020 ICE_SINGLE_ACT_PTR_VAL_M));
3022 /* Use the filter rule ID of the previously created rule with single
3023 * act. Once the update happens, hardware will treat this as large
3026 rx_tx->pdata.lkup_tx_rx.index =
3027 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3029 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3030 ice_aqc_opc_update_sw_rules, NULL);
3032 m_ent->lg_act_idx = l_id;
3033 m_ent->sw_marker_id = sw_marker;
3036 ice_free(hw, lg_act);
3041 * ice_add_counter_act - add/update filter rule with counter action
3042 * @hw: pointer to the hardware structure
3043 * @m_ent: the management entry for which counter needs to be added
3044 * @counter_id: VLAN counter ID returned as part of allocate resource
3045 * @l_id: large action resource ID
3047 static enum ice_status
3048 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3049 u16 counter_id, u16 l_id)
3051 struct ice_aqc_sw_rules_elem *lg_act;
3052 struct ice_aqc_sw_rules_elem *rx_tx;
3053 enum ice_status status;
3054 /* 2 actions will be added while adding a large action counter */
3055 const int num_acts = 2;
3062 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3063 return ICE_ERR_PARAM;
3065 /* Create two back-to-back switch rules and submit them to the HW using
3066 * one memory buffer:
3070 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3071 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3072 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
3075 return ICE_ERR_NO_MEMORY;
3077 rx_tx = (struct ice_aqc_sw_rules_elem *)
3078 ((u8 *)lg_act + lg_act_size);
3080 /* Fill in the first switch rule i.e. large action */
3081 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3082 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3083 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3085 /* First action VSI forwarding or VSI list forwarding depending on how
3088 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3089 m_ent->fltr_info.fwd_id.hw_vsi_id;
3091 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3092 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3093 ICE_LG_ACT_VSI_LIST_ID_M;
3094 if (m_ent->vsi_count > 1)
3095 act |= ICE_LG_ACT_VSI_LIST;
3096 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3098 /* Second action counter ID */
3099 act = ICE_LG_ACT_STAT_COUNT;
3100 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3101 ICE_LG_ACT_STAT_COUNT_M;
3102 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3104 /* call the fill switch rule to fill the lookup Tx Rx structure */
3105 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3106 ice_aqc_opc_update_sw_rules);
3108 act = ICE_SINGLE_ACT_PTR;
3109 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3110 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3112 /* Use the filter rule ID of the previously created rule with single
3113 * act. Once the update happens, hardware will treat this as large
3116 f_rule_id = m_ent->fltr_info.fltr_rule_id;
3117 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3119 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3120 ice_aqc_opc_update_sw_rules, NULL);
3122 m_ent->lg_act_idx = l_id;
3123 m_ent->counter_index = counter_id;
3126 ice_free(hw, lg_act);
3131 * ice_create_vsi_list_map
3132 * @hw: pointer to the hardware structure
3133 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3134 * @num_vsi: number of VSI handles in the array
3135 * @vsi_list_id: VSI list ID generated as part of allocate resource
3137 * Helper function to create a new entry of VSI list ID to VSI mapping
3138 * using the given VSI list ID
3140 static struct ice_vsi_list_map_info *
3141 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3144 struct ice_switch_info *sw = hw->switch_info;
3145 struct ice_vsi_list_map_info *v_map;
3148 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3153 v_map->vsi_list_id = vsi_list_id;
3155 for (i = 0; i < num_vsi; i++)
3156 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3158 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3163 * ice_update_vsi_list_rule
3164 * @hw: pointer to the hardware structure
3165 * @vsi_handle_arr: array of VSI handles to form a VSI list
3166 * @num_vsi: number of VSI handles in the array
3167 * @vsi_list_id: VSI list ID generated as part of allocate resource
3168 * @remove: Boolean value to indicate if this is a remove action
3169 * @opc: switch rules population command type - pass in the command opcode
3170 * @lkup_type: lookup type of the filter
3172 * Call AQ command to add a new switch rule or update existing switch rule
3173 * using the given VSI list ID
3175 static enum ice_status
3176 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3177 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3178 enum ice_sw_lkup_type lkup_type)
3180 struct ice_aqc_sw_rules_elem *s_rule;
3181 enum ice_status status;
3187 return ICE_ERR_PARAM;
3189 if (lkup_type == ICE_SW_LKUP_MAC ||
3190 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3191 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3192 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3193 lkup_type == ICE_SW_LKUP_PROMISC ||
3194 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3195 lkup_type == ICE_SW_LKUP_LAST)
3196 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3197 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3198 else if (lkup_type == ICE_SW_LKUP_VLAN)
3199 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3200 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3202 return ICE_ERR_PARAM;
3204 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3205 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3207 return ICE_ERR_NO_MEMORY;
3208 for (i = 0; i < num_vsi; i++) {
3209 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3210 status = ICE_ERR_PARAM;
3213 /* AQ call requires hw_vsi_id(s) */
3214 s_rule->pdata.vsi_list.vsi[i] =
3215 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3218 s_rule->type = CPU_TO_LE16(rule_type);
3219 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3220 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3222 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3225 ice_free(hw, s_rule);
3230 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3231 * @hw: pointer to the HW struct
3232 * @vsi_handle_arr: array of VSI handles to form a VSI list
3233 * @num_vsi: number of VSI handles in the array
3234 * @vsi_list_id: stores the ID of the VSI list to be created
3235 * @lkup_type: switch rule filter's lookup type
3237 static enum ice_status
3238 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3239 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3241 enum ice_status status;
3243 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3244 ice_aqc_opc_alloc_res);
3248 /* Update the newly created VSI list to include the specified VSIs */
3249 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3250 *vsi_list_id, false,
3251 ice_aqc_opc_add_sw_rules, lkup_type);
3255 * ice_create_pkt_fwd_rule
3256 * @hw: pointer to the hardware structure
3257 * @recp_list: corresponding filter management list
3258 * @f_entry: entry containing packet forwarding information
3260 * Create switch rule with given filter information and add an entry
3261 * to the corresponding filter management list to track this switch rule
3264 static enum ice_status
3265 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3266 struct ice_fltr_list_entry *f_entry)
3268 struct ice_fltr_mgmt_list_entry *fm_entry;
3269 struct ice_aqc_sw_rules_elem *s_rule;
3270 enum ice_status status;
3272 s_rule = (struct ice_aqc_sw_rules_elem *)
3273 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3275 return ICE_ERR_NO_MEMORY;
3276 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3277 ice_malloc(hw, sizeof(*fm_entry));
3279 status = ICE_ERR_NO_MEMORY;
3280 goto ice_create_pkt_fwd_rule_exit;
3283 fm_entry->fltr_info = f_entry->fltr_info;
3285 /* Initialize all the fields for the management entry */
3286 fm_entry->vsi_count = 1;
3287 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3288 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3289 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3291 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3292 ice_aqc_opc_add_sw_rules);
3294 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3295 ice_aqc_opc_add_sw_rules, NULL);
3297 ice_free(hw, fm_entry);
3298 goto ice_create_pkt_fwd_rule_exit;
3301 f_entry->fltr_info.fltr_rule_id =
3302 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3303 fm_entry->fltr_info.fltr_rule_id =
3304 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3306 /* The book keeping entries will get removed when base driver
3307 * calls remove filter AQ command
3309 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3311 ice_create_pkt_fwd_rule_exit:
3312 ice_free(hw, s_rule);
3317 * ice_update_pkt_fwd_rule
3318 * @hw: pointer to the hardware structure
3319 * @f_info: filter information for switch rule
3321 * Call AQ command to update a previously created switch rule with a
3324 static enum ice_status
3325 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3327 struct ice_aqc_sw_rules_elem *s_rule;
3328 enum ice_status status;
3330 s_rule = (struct ice_aqc_sw_rules_elem *)
3331 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3333 return ICE_ERR_NO_MEMORY;
3335 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3337 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3339 /* Update switch rule with new rule set to forward VSI list */
3340 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3341 ice_aqc_opc_update_sw_rules, NULL);
3343 ice_free(hw, s_rule);
3348 * ice_update_sw_rule_bridge_mode
3349 * @hw: pointer to the HW struct
3351 * Updates unicast switch filter rules based on VEB/VEPA mode
3353 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3355 struct ice_switch_info *sw = hw->switch_info;
3356 struct ice_fltr_mgmt_list_entry *fm_entry;
3357 enum ice_status status = ICE_SUCCESS;
3358 struct LIST_HEAD_TYPE *rule_head;
3359 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3361 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3362 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3364 ice_acquire_lock(rule_lock);
3365 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3367 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3368 u8 *addr = fi->l_data.mac.mac_addr;
3370 /* Update unicast Tx rules to reflect the selected
3373 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3374 (fi->fltr_act == ICE_FWD_TO_VSI ||
3375 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3376 fi->fltr_act == ICE_FWD_TO_Q ||
3377 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3378 status = ice_update_pkt_fwd_rule(hw, fi);
3384 ice_release_lock(rule_lock);
3390 * ice_add_update_vsi_list
3391 * @hw: pointer to the hardware structure
3392 * @m_entry: pointer to current filter management list entry
3393 * @cur_fltr: filter information from the book keeping entry
3394 * @new_fltr: filter information with the new VSI to be added
3396 * Call AQ command to add or update previously created VSI list with new VSI.
3398 * Helper function to do book keeping associated with adding filter information
3399 * The algorithm to do the book keeping is described below :
3400 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3401 * if only one VSI has been added till now
3402 * Allocate a new VSI list and add two VSIs
3403 * to this list using switch rule command
3404 * Update the previously created switch rule with the
3405 * newly created VSI list ID
3406 * if a VSI list was previously created
3407 * Add the new VSI to the previously created VSI list set
3408 * using the update switch rule command
3410 static enum ice_status
3411 ice_add_update_vsi_list(struct ice_hw *hw,
3412 struct ice_fltr_mgmt_list_entry *m_entry,
3413 struct ice_fltr_info *cur_fltr,
3414 struct ice_fltr_info *new_fltr)
3416 enum ice_status status = ICE_SUCCESS;
3417 u16 vsi_list_id = 0;
3419 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3420 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3421 return ICE_ERR_NOT_IMPL;
3423 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3424 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3425 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3426 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3427 return ICE_ERR_NOT_IMPL;
3429 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3430 /* Only one entry existed in the mapping and it was not already
3431 * a part of a VSI list. So, create a VSI list with the old and
3434 struct ice_fltr_info tmp_fltr;
3435 u16 vsi_handle_arr[2];
3437 /* A rule already exists with the new VSI being added */
3438 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3439 return ICE_ERR_ALREADY_EXISTS;
3441 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3442 vsi_handle_arr[1] = new_fltr->vsi_handle;
3443 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3445 new_fltr->lkup_type);
3449 tmp_fltr = *new_fltr;
3450 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3451 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3452 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3453 /* Update the previous switch rule of "MAC forward to VSI" to
3454 * "MAC fwd to VSI list"
3456 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3460 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3461 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3462 m_entry->vsi_list_info =
3463 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3466 /* If this entry was large action then the large action needs
3467 * to be updated to point to FWD to VSI list
3469 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3471 ice_add_marker_act(hw, m_entry,
3472 m_entry->sw_marker_id,
3473 m_entry->lg_act_idx);
3475 u16 vsi_handle = new_fltr->vsi_handle;
3476 enum ice_adminq_opc opcode;
3478 if (!m_entry->vsi_list_info)
3481 /* A rule already exists with the new VSI being added */
3482 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3485 /* Update the previously created VSI list set with
3486 * the new VSI ID passed in
3488 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3489 opcode = ice_aqc_opc_update_sw_rules;
3491 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3492 vsi_list_id, false, opcode,
3493 new_fltr->lkup_type);
3494 /* update VSI list mapping info with new VSI ID */
3496 ice_set_bit(vsi_handle,
3497 m_entry->vsi_list_info->vsi_map);
3500 m_entry->vsi_count++;
3505 * ice_find_rule_entry - Search a rule entry
3506 * @list_head: head of rule list
3507 * @f_info: rule information
3509 * Helper function to search for a given rule entry
3510 * Returns pointer to entry storing the rule if found
3512 static struct ice_fltr_mgmt_list_entry *
3513 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3514 struct ice_fltr_info *f_info)
3516 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3518 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3520 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3521 sizeof(f_info->l_data)) &&
3522 f_info->flag == list_itr->fltr_info.flag) {
3531 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3532 * @recp_list: VSI lists needs to be searched
3533 * @vsi_handle: VSI handle to be found in VSI list
3534 * @vsi_list_id: VSI list ID found containing vsi_handle
3536 * Helper function to search a VSI list with single entry containing given VSI
3537 * handle element. This can be extended further to search VSI list with more
3538 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3540 static struct ice_vsi_list_map_info *
3541 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3544 struct ice_vsi_list_map_info *map_info = NULL;
3545 struct LIST_HEAD_TYPE *list_head;
3547 list_head = &recp_list->filt_rules;
3548 if (recp_list->adv_rule) {
3549 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3551 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3552 ice_adv_fltr_mgmt_list_entry,
3554 if (list_itr->vsi_list_info) {
3555 map_info = list_itr->vsi_list_info;
3556 if (ice_is_bit_set(map_info->vsi_map,
3558 *vsi_list_id = map_info->vsi_list_id;
3564 struct ice_fltr_mgmt_list_entry *list_itr;
3566 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3567 ice_fltr_mgmt_list_entry,
3569 if (list_itr->vsi_count == 1 &&
3570 list_itr->vsi_list_info) {
3571 map_info = list_itr->vsi_list_info;
3572 if (ice_is_bit_set(map_info->vsi_map,
3574 *vsi_list_id = map_info->vsi_list_id;
3584 * ice_add_rule_internal - add rule for a given lookup type
3585 * @hw: pointer to the hardware structure
3586 * @recp_list: recipe list for which rule has to be added
3587 * @lport: logic port number on which function add rule
3588 * @f_entry: structure containing MAC forwarding information
3590 * Adds or updates the rule lists for a given recipe
3592 static enum ice_status
3593 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3594 u8 lport, struct ice_fltr_list_entry *f_entry)
3596 struct ice_fltr_info *new_fltr, *cur_fltr;
3597 struct ice_fltr_mgmt_list_entry *m_entry;
3598 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3599 enum ice_status status = ICE_SUCCESS;
3601 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3602 return ICE_ERR_PARAM;
3604 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3605 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3606 f_entry->fltr_info.fwd_id.hw_vsi_id =
3607 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3609 rule_lock = &recp_list->filt_rule_lock;
3611 ice_acquire_lock(rule_lock);
3612 new_fltr = &f_entry->fltr_info;
3613 if (new_fltr->flag & ICE_FLTR_RX)
3614 new_fltr->src = lport;
3615 else if (new_fltr->flag & ICE_FLTR_TX)
3617 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3619 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3621 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3622 goto exit_add_rule_internal;
3625 cur_fltr = &m_entry->fltr_info;
3626 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3628 exit_add_rule_internal:
3629 ice_release_lock(rule_lock);
3634 * ice_remove_vsi_list_rule
3635 * @hw: pointer to the hardware structure
3636 * @vsi_list_id: VSI list ID generated as part of allocate resource
3637 * @lkup_type: switch rule filter lookup type
3639 * The VSI list should be emptied before this function is called to remove the
3642 static enum ice_status
3643 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3644 enum ice_sw_lkup_type lkup_type)
3646 /* Free the vsi_list resource that we allocated. It is assumed that the
3647 * list is empty at this point.
3649 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3650 ice_aqc_opc_free_res);
3654 * ice_rem_update_vsi_list
3655 * @hw: pointer to the hardware structure
3656 * @vsi_handle: VSI handle of the VSI to remove
3657 * @fm_list: filter management entry for which the VSI list management needs to
3660 static enum ice_status
3661 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3662 struct ice_fltr_mgmt_list_entry *fm_list)
3664 enum ice_sw_lkup_type lkup_type;
3665 enum ice_status status = ICE_SUCCESS;
3668 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3669 fm_list->vsi_count == 0)
3670 return ICE_ERR_PARAM;
3672 /* A rule with the VSI being removed does not exist */
3673 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3674 return ICE_ERR_DOES_NOT_EXIST;
3676 lkup_type = fm_list->fltr_info.lkup_type;
3677 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3678 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3679 ice_aqc_opc_update_sw_rules,
3684 fm_list->vsi_count--;
3685 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3687 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3688 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3689 struct ice_vsi_list_map_info *vsi_list_info =
3690 fm_list->vsi_list_info;
3693 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3695 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3696 return ICE_ERR_OUT_OF_RANGE;
3698 /* Make sure VSI list is empty before removing it below */
3699 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3701 ice_aqc_opc_update_sw_rules,
3706 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3707 tmp_fltr_info.fwd_id.hw_vsi_id =
3708 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3709 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3710 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3712 ice_debug(hw, ICE_DBG_SW,
3713 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3714 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3718 fm_list->fltr_info = tmp_fltr_info;
3721 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3722 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3723 struct ice_vsi_list_map_info *vsi_list_info =
3724 fm_list->vsi_list_info;
3726 /* Remove the VSI list since it is no longer used */
3727 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3729 ice_debug(hw, ICE_DBG_SW,
3730 "Failed to remove VSI list %d, error %d\n",
3731 vsi_list_id, status);
3735 LIST_DEL(&vsi_list_info->list_entry);
3736 ice_free(hw, vsi_list_info);
3737 fm_list->vsi_list_info = NULL;
3744 * ice_remove_rule_internal - Remove a filter rule of a given type
3746 * @hw: pointer to the hardware structure
3747 * @recp_list: recipe list for which the rule needs to removed
3748 * @f_entry: rule entry containing filter information
3750 static enum ice_status
3751 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3752 struct ice_fltr_list_entry *f_entry)
3754 struct ice_fltr_mgmt_list_entry *list_elem;
3755 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3756 enum ice_status status = ICE_SUCCESS;
3757 bool remove_rule = false;
3760 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3761 return ICE_ERR_PARAM;
3762 f_entry->fltr_info.fwd_id.hw_vsi_id =
3763 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3765 rule_lock = &recp_list->filt_rule_lock;
3766 ice_acquire_lock(rule_lock);
3767 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3768 &f_entry->fltr_info);
3770 status = ICE_ERR_DOES_NOT_EXIST;
3774 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3776 } else if (!list_elem->vsi_list_info) {
3777 status = ICE_ERR_DOES_NOT_EXIST;
3779 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3780 /* a ref_cnt > 1 indicates that the vsi_list is being
3781 * shared by multiple rules. Decrement the ref_cnt and
3782 * remove this rule, but do not modify the list, as it
3783 * is in-use by other rules.
3785 list_elem->vsi_list_info->ref_cnt--;
3788 /* a ref_cnt of 1 indicates the vsi_list is only used
3789 * by one rule. However, the original removal request is only
3790 * for a single VSI. Update the vsi_list first, and only
3791 * remove the rule if there are no further VSIs in this list.
3793 vsi_handle = f_entry->fltr_info.vsi_handle;
3794 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3797 /* if VSI count goes to zero after updating the VSI list */
3798 if (list_elem->vsi_count == 0)
3803 /* Remove the lookup rule */
3804 struct ice_aqc_sw_rules_elem *s_rule;
3806 s_rule = (struct ice_aqc_sw_rules_elem *)
3807 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3809 status = ICE_ERR_NO_MEMORY;
3813 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3814 ice_aqc_opc_remove_sw_rules);
3816 status = ice_aq_sw_rules(hw, s_rule,
3817 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3818 ice_aqc_opc_remove_sw_rules, NULL);
3820 /* Remove a book keeping from the list */
3821 ice_free(hw, s_rule);
3826 LIST_DEL(&list_elem->list_entry);
3827 ice_free(hw, list_elem);
3830 ice_release_lock(rule_lock);
3835 * ice_aq_get_res_alloc - get allocated resources
3836 * @hw: pointer to the HW struct
3837 * @num_entries: pointer to u16 to store the number of resource entries returned
3838 * @buf: pointer to buffer
3839 * @buf_size: size of buf
3840 * @cd: pointer to command details structure or NULL
3842 * The caller-supplied buffer must be large enough to store the resource
3843 * information for all resource types. Each resource type is an
3844 * ice_aqc_get_res_resp_elem structure.
3847 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
3848 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
3849 struct ice_sq_cd *cd)
3851 struct ice_aqc_get_res_alloc *resp;
3852 enum ice_status status;
3853 struct ice_aq_desc desc;
3856 return ICE_ERR_BAD_PTR;
3858 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3859 return ICE_ERR_INVAL_SIZE;
3861 resp = &desc.params.get_res;
3863 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3864 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3866 if (!status && num_entries)
3867 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3873 * ice_aq_get_res_descs - get allocated resource descriptors
3874 * @hw: pointer to the hardware structure
3875 * @num_entries: number of resource entries in buffer
3876 * @buf: structure to hold response data buffer
3877 * @buf_size: size of buffer
3878 * @res_type: resource type
3879 * @res_shared: is resource shared
3880 * @desc_id: input - first desc ID to start; output - next desc ID
3881 * @cd: pointer to command details structure or NULL
3884 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3885 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
3886 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
3888 struct ice_aqc_get_allocd_res_desc *cmd;
3889 struct ice_aq_desc desc;
3890 enum ice_status status;
3892 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3894 cmd = &desc.params.get_res_desc;
3897 return ICE_ERR_PARAM;
3899 if (buf_size != (num_entries * sizeof(*buf)))
3900 return ICE_ERR_PARAM;
3902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3904 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3905 ICE_AQC_RES_TYPE_M) | (res_shared ?
3906 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3907 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3909 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3911 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3917 * ice_add_mac_rule - Add a MAC address based filter rule
3918 * @hw: pointer to the hardware structure
3919 * @m_list: list of MAC addresses and forwarding information
3920 * @sw: pointer to switch info struct for which function add rule
3921 * @lport: logic port number on which function add rule
3923 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3924 * multiple unicast addresses, the function assumes that all the
3925 * addresses are unique in a given add_mac call. It doesn't
3926 * check for duplicates in this case, removing duplicates from a given
3927 * list should be taken care of in the caller of this function.
3929 static enum ice_status
3930 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3931 struct ice_switch_info *sw, u8 lport)
3933 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3934 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3935 struct ice_fltr_list_entry *m_list_itr;
3936 struct LIST_HEAD_TYPE *rule_head;
3937 u16 total_elem_left, s_rule_size;
3938 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3939 enum ice_status status = ICE_SUCCESS;
3940 u16 num_unicast = 0;
3944 rule_lock = &recp_list->filt_rule_lock;
3945 rule_head = &recp_list->filt_rules;
3947 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3949 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3953 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3954 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3955 if (!ice_is_vsi_valid(hw, vsi_handle))
3956 return ICE_ERR_PARAM;
3957 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3958 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3959 /* update the src in case it is VSI num */
3960 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3961 return ICE_ERR_PARAM;
3962 m_list_itr->fltr_info.src = hw_vsi_id;
3963 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3964 IS_ZERO_ETHER_ADDR(add))
3965 return ICE_ERR_PARAM;
3966 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3967 /* Don't overwrite the unicast address */
3968 ice_acquire_lock(rule_lock);
3969 if (ice_find_rule_entry(rule_head,
3970 &m_list_itr->fltr_info)) {
3971 ice_release_lock(rule_lock);
3972 return ICE_ERR_ALREADY_EXISTS;
3974 ice_release_lock(rule_lock);
3976 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3977 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3978 m_list_itr->status =
3979 ice_add_rule_internal(hw, recp_list, lport,
3981 if (m_list_itr->status)
3982 return m_list_itr->status;
3986 ice_acquire_lock(rule_lock);
3987 /* Exit if no suitable entries were found for adding bulk switch rule */
3989 status = ICE_SUCCESS;
3990 goto ice_add_mac_exit;
3993 /* Allocate switch rule buffer for the bulk update for unicast */
3994 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3995 s_rule = (struct ice_aqc_sw_rules_elem *)
3996 ice_calloc(hw, num_unicast, s_rule_size);
3998 status = ICE_ERR_NO_MEMORY;
3999 goto ice_add_mac_exit;
4003 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4005 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4006 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4008 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4009 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4010 ice_aqc_opc_add_sw_rules);
4011 r_iter = (struct ice_aqc_sw_rules_elem *)
4012 ((u8 *)r_iter + s_rule_size);
4016 /* Call AQ bulk switch rule update for all unicast addresses */
4018 /* Call AQ switch rule in AQ_MAX chunk */
4019 for (total_elem_left = num_unicast; total_elem_left > 0;
4020 total_elem_left -= elem_sent) {
4021 struct ice_aqc_sw_rules_elem *entry = r_iter;
4023 elem_sent = MIN_T(u8, total_elem_left,
4024 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4025 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4026 elem_sent, ice_aqc_opc_add_sw_rules,
4029 goto ice_add_mac_exit;
4030 r_iter = (struct ice_aqc_sw_rules_elem *)
4031 ((u8 *)r_iter + (elem_sent * s_rule_size));
4034 /* Fill up rule ID based on the value returned from FW */
4036 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4038 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4039 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4040 struct ice_fltr_mgmt_list_entry *fm_entry;
4042 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4043 f_info->fltr_rule_id =
4044 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4045 f_info->fltr_act = ICE_FWD_TO_VSI;
4046 /* Create an entry to track this MAC address */
4047 fm_entry = (struct ice_fltr_mgmt_list_entry *)
4048 ice_malloc(hw, sizeof(*fm_entry));
4050 status = ICE_ERR_NO_MEMORY;
4051 goto ice_add_mac_exit;
4053 fm_entry->fltr_info = *f_info;
4054 fm_entry->vsi_count = 1;
4055 /* The book keeping entries will get removed when
4056 * base driver calls remove filter AQ command
4059 LIST_ADD(&fm_entry->list_entry, rule_head);
4060 r_iter = (struct ice_aqc_sw_rules_elem *)
4061 ((u8 *)r_iter + s_rule_size);
4066 ice_release_lock(rule_lock);
4068 ice_free(hw, s_rule);
4073 * ice_add_mac - Add a MAC address based filter rule
4074 * @hw: pointer to the hardware structure
4075 * @m_list: list of MAC addresses and forwarding information
4077 * Function add MAC rule for logical port from HW struct
4079 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4082 return ICE_ERR_PARAM;
4084 return ice_add_mac_rule(hw, m_list, hw->switch_info,
4085 hw->port_info->lport);
4089 * ice_add_vlan_internal - Add one VLAN based filter rule
4090 * @hw: pointer to the hardware structure
4091 * @recp_list: recipe list for which rule has to be added
4092 * @f_entry: filter entry containing one VLAN information
4094 static enum ice_status
4095 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4096 struct ice_fltr_list_entry *f_entry)
4098 struct ice_fltr_mgmt_list_entry *v_list_itr;
4099 struct ice_fltr_info *new_fltr, *cur_fltr;
4100 enum ice_sw_lkup_type lkup_type;
4101 u16 vsi_list_id = 0, vsi_handle;
4102 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4103 enum ice_status status = ICE_SUCCESS;
4105 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4106 return ICE_ERR_PARAM;
4108 f_entry->fltr_info.fwd_id.hw_vsi_id =
4109 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4110 new_fltr = &f_entry->fltr_info;
4112 /* VLAN ID should only be 12 bits */
4113 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4114 return ICE_ERR_PARAM;
4116 if (new_fltr->src_id != ICE_SRC_ID_VSI)
4117 return ICE_ERR_PARAM;
4119 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4120 lkup_type = new_fltr->lkup_type;
4121 vsi_handle = new_fltr->vsi_handle;
4122 rule_lock = &recp_list->filt_rule_lock;
4123 ice_acquire_lock(rule_lock);
4124 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4126 struct ice_vsi_list_map_info *map_info = NULL;
4128 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4129 /* All VLAN pruning rules use a VSI list. Check if
4130 * there is already a VSI list containing VSI that we
4131 * want to add. If found, use the same vsi_list_id for
4132 * this new VLAN rule or else create a new list.
4134 map_info = ice_find_vsi_list_entry(recp_list,
4138 status = ice_create_vsi_list_rule(hw,
4146 /* Convert the action to forwarding to a VSI list. */
4147 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4148 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4151 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4153 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4156 status = ICE_ERR_DOES_NOT_EXIST;
4159 /* reuse VSI list for new rule and increment ref_cnt */
4161 v_list_itr->vsi_list_info = map_info;
4162 map_info->ref_cnt++;
4164 v_list_itr->vsi_list_info =
4165 ice_create_vsi_list_map(hw, &vsi_handle,
4169 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4170 /* Update existing VSI list to add new VSI ID only if it used
4173 cur_fltr = &v_list_itr->fltr_info;
4174 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4177 /* If VLAN rule exists and VSI list being used by this rule is
4178 * referenced by more than 1 VLAN rule. Then create a new VSI
4179 * list appending previous VSI with new VSI and update existing
4180 * VLAN rule to point to new VSI list ID
4182 struct ice_fltr_info tmp_fltr;
4183 u16 vsi_handle_arr[2];
4186 /* Current implementation only supports reusing VSI list with
4187 * one VSI count. We should never hit below condition
4189 if (v_list_itr->vsi_count > 1 &&
4190 v_list_itr->vsi_list_info->ref_cnt > 1) {
4191 ice_debug(hw, ICE_DBG_SW,
4192 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4193 status = ICE_ERR_CFG;
4198 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4201 /* A rule already exists with the new VSI being added */
4202 if (cur_handle == vsi_handle) {
4203 status = ICE_ERR_ALREADY_EXISTS;
4207 vsi_handle_arr[0] = cur_handle;
4208 vsi_handle_arr[1] = vsi_handle;
4209 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4210 &vsi_list_id, lkup_type);
4214 tmp_fltr = v_list_itr->fltr_info;
4215 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4216 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4217 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4218 /* Update the previous switch rule to a new VSI list which
4219 * includes current VSI that is requested
4221 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4225 /* before overriding VSI list map info. decrement ref_cnt of
4228 v_list_itr->vsi_list_info->ref_cnt--;
4230 /* now update to newly created list */
4231 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4232 v_list_itr->vsi_list_info =
4233 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4235 v_list_itr->vsi_count++;
4239 ice_release_lock(rule_lock);
4244 * ice_add_vlan_rule - Add VLAN based filter rule
4245 * @hw: pointer to the hardware structure
4246 * @v_list: list of VLAN entries and forwarding information
4247 * @sw: pointer to switch info struct for which function add rule
4249 static enum ice_status
4250 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4251 struct ice_switch_info *sw)
4253 struct ice_fltr_list_entry *v_list_itr;
4254 struct ice_sw_recipe *recp_list;
4256 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4257 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4259 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4260 return ICE_ERR_PARAM;
4261 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4262 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4264 if (v_list_itr->status)
4265 return v_list_itr->status;
4271 * ice_add_vlan - Add a VLAN based filter rule
4272 * @hw: pointer to the hardware structure
4273 * @v_list: list of VLAN and forwarding information
4275 * Function add VLAN rule for logical port from HW struct
4277 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4280 return ICE_ERR_PARAM;
4282 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4286 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4287 * @hw: pointer to the hardware structure
4288 * @mv_list: list of MAC and VLAN filters
4289 * @sw: pointer to switch info struct for which function add rule
4290 * @lport: logic port number on which function add rule
4292 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4293 * pruning bits enabled, then it is the responsibility of the caller to make
4294 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4295 * VLAN won't be received on that VSI otherwise.
4297 static enum ice_status
4298 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4299 struct ice_switch_info *sw, u8 lport)
4301 struct ice_fltr_list_entry *mv_list_itr;
4302 struct ice_sw_recipe *recp_list;
4304 if (!mv_list || !hw)
4305 return ICE_ERR_PARAM;
4307 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4308 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4310 enum ice_sw_lkup_type l_type =
4311 mv_list_itr->fltr_info.lkup_type;
4313 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4314 return ICE_ERR_PARAM;
4315 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4316 mv_list_itr->status =
4317 ice_add_rule_internal(hw, recp_list, lport,
4319 if (mv_list_itr->status)
4320 return mv_list_itr->status;
4326 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4327 * @hw: pointer to the hardware structure
4328 * @mv_list: list of MAC VLAN addresses and forwarding information
4330 * Function add MAC VLAN rule for logical port from HW struct
4333 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4335 if (!mv_list || !hw)
4336 return ICE_ERR_PARAM;
4338 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4339 hw->port_info->lport);
4343 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4344 * @hw: pointer to the hardware structure
4345 * @em_list: list of ether type MAC filter, MAC is optional
4346 * @sw: pointer to switch info struct for which function add rule
4347 * @lport: logic port number on which function add rule
4349 * This function requires the caller to populate the entries in
4350 * the filter list with the necessary fields (including flags to
4351 * indicate Tx or Rx rules).
4353 static enum ice_status
4354 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4355 struct ice_switch_info *sw, u8 lport)
4357 struct ice_fltr_list_entry *em_list_itr;
4359 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4361 struct ice_sw_recipe *recp_list;
4362 enum ice_sw_lkup_type l_type;
4364 l_type = em_list_itr->fltr_info.lkup_type;
4365 recp_list = &sw->recp_list[l_type];
4367 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4368 l_type != ICE_SW_LKUP_ETHERTYPE)
4369 return ICE_ERR_PARAM;
4371 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4374 if (em_list_itr->status)
4375 return em_list_itr->status;
4381 * ice_add_eth_mac - Add a ethertype based filter rule
4382 * @hw: pointer to the hardware structure
4383 * @em_list: list of ethertype and forwarding information
4385 * Function add ethertype rule for logical port from HW struct
4388 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4390 if (!em_list || !hw)
4391 return ICE_ERR_PARAM;
4393 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4394 hw->port_info->lport);
4398 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4399 * @hw: pointer to the hardware structure
4400 * @em_list: list of ethertype or ethertype MAC entries
4401 * @sw: pointer to switch info struct for which function add rule
4403 static enum ice_status
4404 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4405 struct ice_switch_info *sw)
4407 struct ice_fltr_list_entry *em_list_itr, *tmp;
4409 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4411 struct ice_sw_recipe *recp_list;
4412 enum ice_sw_lkup_type l_type;
4414 l_type = em_list_itr->fltr_info.lkup_type;
4416 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4417 l_type != ICE_SW_LKUP_ETHERTYPE)
4418 return ICE_ERR_PARAM;
4420 recp_list = &sw->recp_list[l_type];
4421 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4423 if (em_list_itr->status)
4424 return em_list_itr->status;
4430 * ice_remove_eth_mac - remove a ethertype based filter rule
4431 * @hw: pointer to the hardware structure
4432 * @em_list: list of ethertype and forwarding information
4436 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4438 if (!em_list || !hw)
4439 return ICE_ERR_PARAM;
4441 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4445 * ice_rem_sw_rule_info
4446 * @hw: pointer to the hardware structure
4447 * @rule_head: pointer to the switch list structure that we want to delete
4450 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4452 if (!LIST_EMPTY(rule_head)) {
4453 struct ice_fltr_mgmt_list_entry *entry;
4454 struct ice_fltr_mgmt_list_entry *tmp;
4456 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4457 ice_fltr_mgmt_list_entry, list_entry) {
4458 LIST_DEL(&entry->list_entry);
4459 ice_free(hw, entry);
4465 * ice_rem_adv_rule_info
4466 * @hw: pointer to the hardware structure
4467 * @rule_head: pointer to the switch list structure that we want to delete
4470 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4472 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4473 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4475 if (LIST_EMPTY(rule_head))
4478 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4479 ice_adv_fltr_mgmt_list_entry, list_entry) {
4480 LIST_DEL(&lst_itr->list_entry);
4481 ice_free(hw, lst_itr->lkups);
4482 ice_free(hw, lst_itr);
4487 * ice_rem_all_sw_rules_info
4488 * @hw: pointer to the hardware structure
4490 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4492 struct ice_switch_info *sw = hw->switch_info;
4495 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4496 struct LIST_HEAD_TYPE *rule_head;
4498 rule_head = &sw->recp_list[i].filt_rules;
4499 if (!sw->recp_list[i].adv_rule)
4500 ice_rem_sw_rule_info(hw, rule_head);
4502 ice_rem_adv_rule_info(hw, rule_head);
4503 if (sw->recp_list[i].adv_rule &&
4504 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4505 sw->recp_list[i].adv_rule = false;
4510 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4511 * @pi: pointer to the port_info structure
4512 * @vsi_handle: VSI handle to set as default
4513 * @set: true to add the above mentioned switch rule, false to remove it
4514 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4516 * add filter rule to set/unset given VSI as default VSI for the switch
4517 * (represented by swid)
4520 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4523 struct ice_aqc_sw_rules_elem *s_rule;
4524 struct ice_fltr_info f_info;
4525 struct ice_hw *hw = pi->hw;
4526 enum ice_adminq_opc opcode;
4527 enum ice_status status;
4531 if (!ice_is_vsi_valid(hw, vsi_handle))
4532 return ICE_ERR_PARAM;
4533 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4535 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4536 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4537 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4539 return ICE_ERR_NO_MEMORY;
4541 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4543 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4544 f_info.flag = direction;
4545 f_info.fltr_act = ICE_FWD_TO_VSI;
4546 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4548 if (f_info.flag & ICE_FLTR_RX) {
4549 f_info.src = pi->lport;
4550 f_info.src_id = ICE_SRC_ID_LPORT;
4552 f_info.fltr_rule_id =
4553 pi->dflt_rx_vsi_rule_id;
4554 } else if (f_info.flag & ICE_FLTR_TX) {
4555 f_info.src_id = ICE_SRC_ID_VSI;
4556 f_info.src = hw_vsi_id;
4558 f_info.fltr_rule_id =
4559 pi->dflt_tx_vsi_rule_id;
4563 opcode = ice_aqc_opc_add_sw_rules;
4565 opcode = ice_aqc_opc_remove_sw_rules;
4567 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4569 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4570 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4573 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4575 if (f_info.flag & ICE_FLTR_TX) {
4576 pi->dflt_tx_vsi_num = hw_vsi_id;
4577 pi->dflt_tx_vsi_rule_id = index;
4578 } else if (f_info.flag & ICE_FLTR_RX) {
4579 pi->dflt_rx_vsi_num = hw_vsi_id;
4580 pi->dflt_rx_vsi_rule_id = index;
4583 if (f_info.flag & ICE_FLTR_TX) {
4584 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4585 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4586 } else if (f_info.flag & ICE_FLTR_RX) {
4587 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4588 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4593 ice_free(hw, s_rule);
4598 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4599 * @list_head: head of rule list
4600 * @f_info: rule information
4602 * Helper function to search for a unicast rule entry - this is to be used
4603 * to remove unicast MAC filter that is not shared with other VSIs on the
4606 * Returns pointer to entry storing the rule if found
4608 static struct ice_fltr_mgmt_list_entry *
4609 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4610 struct ice_fltr_info *f_info)
4612 struct ice_fltr_mgmt_list_entry *list_itr;
4614 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4616 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4617 sizeof(f_info->l_data)) &&
4618 f_info->fwd_id.hw_vsi_id ==
4619 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4620 f_info->flag == list_itr->fltr_info.flag)
4627 * ice_remove_mac_rule - remove a MAC based filter rule
4628 * @hw: pointer to the hardware structure
4629 * @m_list: list of MAC addresses and forwarding information
4630 * @recp_list: list from which function remove MAC address
4632 * This function removes either a MAC filter rule or a specific VSI from a
4633 * VSI list for a multicast MAC address.
4635 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4636 * ice_add_mac. Caller should be aware that this call will only work if all
4637 * the entries passed into m_list were added previously. It will not attempt to
4638 * do a partial remove of entries that were found.
4640 static enum ice_status
4641 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4642 struct ice_sw_recipe *recp_list)
4644 struct ice_fltr_list_entry *list_itr, *tmp;
4645 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4648 return ICE_ERR_PARAM;
4650 rule_lock = &recp_list->filt_rule_lock;
4651 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4653 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4654 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4657 if (l_type != ICE_SW_LKUP_MAC)
4658 return ICE_ERR_PARAM;
4660 vsi_handle = list_itr->fltr_info.vsi_handle;
4661 if (!ice_is_vsi_valid(hw, vsi_handle))
4662 return ICE_ERR_PARAM;
4664 list_itr->fltr_info.fwd_id.hw_vsi_id =
4665 ice_get_hw_vsi_num(hw, vsi_handle);
4666 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4667 /* Don't remove the unicast address that belongs to
4668 * another VSI on the switch, since it is not being
4671 ice_acquire_lock(rule_lock);
4672 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4673 &list_itr->fltr_info)) {
4674 ice_release_lock(rule_lock);
4675 return ICE_ERR_DOES_NOT_EXIST;
4677 ice_release_lock(rule_lock);
4679 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4681 if (list_itr->status)
4682 return list_itr->status;
4688 * ice_remove_mac - remove a MAC address based filter rule
4689 * @hw: pointer to the hardware structure
4690 * @m_list: list of MAC addresses and forwarding information
4693 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4695 struct ice_sw_recipe *recp_list;
4697 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4698 return ice_remove_mac_rule(hw, m_list, recp_list);
4702 * ice_remove_vlan_rule - Remove VLAN based filter rule
4703 * @hw: pointer to the hardware structure
4704 * @v_list: list of VLAN entries and forwarding information
4705 * @recp_list: list from which function remove VLAN
4707 static enum ice_status
4708 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4709 struct ice_sw_recipe *recp_list)
4711 struct ice_fltr_list_entry *v_list_itr, *tmp;
4713 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4715 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4717 if (l_type != ICE_SW_LKUP_VLAN)
4718 return ICE_ERR_PARAM;
4719 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4721 if (v_list_itr->status)
4722 return v_list_itr->status;
4728 * ice_remove_vlan - remove a VLAN address based filter rule
4729 * @hw: pointer to the hardware structure
4730 * @v_list: list of VLAN and forwarding information
4734 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4736 struct ice_sw_recipe *recp_list;
4739 return ICE_ERR_PARAM;
4741 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4742 return ice_remove_vlan_rule(hw, v_list, recp_list);
4746 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4747 * @hw: pointer to the hardware structure
4748 * @v_list: list of MAC VLAN entries and forwarding information
4749 * @recp_list: list from which function remove MAC VLAN
4751 static enum ice_status
4752 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4753 struct ice_sw_recipe *recp_list)
4755 struct ice_fltr_list_entry *v_list_itr, *tmp;
4757 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4758 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4760 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4762 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4763 return ICE_ERR_PARAM;
4764 v_list_itr->status =
4765 ice_remove_rule_internal(hw, recp_list,
4767 if (v_list_itr->status)
4768 return v_list_itr->status;
4774 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4775 * @hw: pointer to the hardware structure
4776 * @mv_list: list of MAC VLAN and forwarding information
4779 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4781 struct ice_sw_recipe *recp_list;
4783 if (!mv_list || !hw)
4784 return ICE_ERR_PARAM;
4786 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4787 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4791 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4792 * @fm_entry: filter entry to inspect
4793 * @vsi_handle: VSI handle to compare with filter info
4796 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4798 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4799 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4800 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4801 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4806 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4807 * @hw: pointer to the hardware structure
4808 * @vsi_handle: VSI handle to remove filters from
4809 * @vsi_list_head: pointer to the list to add entry to
4810 * @fi: pointer to fltr_info of filter entry to copy & add
4812 * Helper function, used when creating a list of filters to remove from
4813 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4814 * original filter entry, with the exception of fltr_info.fltr_act and
4815 * fltr_info.fwd_id fields. These are set such that later logic can
4816 * extract which VSI to remove the fltr from, and pass on that information.
4818 static enum ice_status
4819 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4820 struct LIST_HEAD_TYPE *vsi_list_head,
4821 struct ice_fltr_info *fi)
4823 struct ice_fltr_list_entry *tmp;
4825 /* this memory is freed up in the caller function
4826 * once filters for this VSI are removed
4828 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4830 return ICE_ERR_NO_MEMORY;
4832 tmp->fltr_info = *fi;
4834 /* Overwrite these fields to indicate which VSI to remove filter from,
4835 * so find and remove logic can extract the information from the
4836 * list entries. Note that original entries will still have proper
4839 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4840 tmp->fltr_info.vsi_handle = vsi_handle;
4841 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4843 LIST_ADD(&tmp->list_entry, vsi_list_head);
4849 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4850 * @hw: pointer to the hardware structure
4851 * @vsi_handle: VSI handle to remove filters from
4852 * @lkup_list_head: pointer to the list that has certain lookup type filters
4853 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4855 * Locates all filters in lkup_list_head that are used by the given VSI,
4856 * and adds COPIES of those entries to vsi_list_head (intended to be used
4857 * to remove the listed filters).
4858 * Note that this means all entries in vsi_list_head must be explicitly
4859 * deallocated by the caller when done with list.
4861 static enum ice_status
4862 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4863 struct LIST_HEAD_TYPE *lkup_list_head,
4864 struct LIST_HEAD_TYPE *vsi_list_head)
4866 struct ice_fltr_mgmt_list_entry *fm_entry;
4867 enum ice_status status = ICE_SUCCESS;
4869 /* check to make sure VSI ID is valid and within boundary */
4870 if (!ice_is_vsi_valid(hw, vsi_handle))
4871 return ICE_ERR_PARAM;
4873 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4874 ice_fltr_mgmt_list_entry, list_entry) {
4875 struct ice_fltr_info *fi;
4877 fi = &fm_entry->fltr_info;
4878 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4881 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4890 * ice_determine_promisc_mask
4891 * @fi: filter info to parse
4893 * Helper function to determine which ICE_PROMISC_ mask corresponds
4894 * to given filter into.
4896 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4898 u16 vid = fi->l_data.mac_vlan.vlan_id;
4899 u8 *macaddr = fi->l_data.mac.mac_addr;
4900 bool is_tx_fltr = false;
4901 u8 promisc_mask = 0;
4903 if (fi->flag == ICE_FLTR_TX)
4906 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4907 promisc_mask |= is_tx_fltr ?
4908 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4909 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4910 promisc_mask |= is_tx_fltr ?
4911 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4912 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4913 promisc_mask |= is_tx_fltr ?
4914 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4916 promisc_mask |= is_tx_fltr ?
4917 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4919 return promisc_mask;
4923 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4924 * @hw: pointer to the hardware structure
4925 * @vsi_handle: VSI handle to retrieve info from
4926 * @promisc_mask: pointer to mask to be filled in
4927 * @vid: VLAN ID of promisc VLAN VSI
4928 * @sw: pointer to switch info struct for which function add rule
4930 static enum ice_status
4931 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4932 u16 *vid, struct ice_switch_info *sw)
4934 struct ice_fltr_mgmt_list_entry *itr;
4935 struct LIST_HEAD_TYPE *rule_head;
4936 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4938 if (!ice_is_vsi_valid(hw, vsi_handle))
4939 return ICE_ERR_PARAM;
4943 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4944 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4946 ice_acquire_lock(rule_lock);
4947 LIST_FOR_EACH_ENTRY(itr, rule_head,
4948 ice_fltr_mgmt_list_entry, list_entry) {
4949 /* Continue if this filter doesn't apply to this VSI or the
4950 * VSI ID is not in the VSI map for this filter
4952 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4955 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4957 ice_release_lock(rule_lock);
4963 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4964 * @hw: pointer to the hardware structure
4965 * @vsi_handle: VSI handle to retrieve info from
4966 * @promisc_mask: pointer to mask to be filled in
4967 * @vid: VLAN ID of promisc VLAN VSI
4970 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4973 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4974 vid, hw->switch_info);
4978 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4979 * @hw: pointer to the hardware structure
4980 * @vsi_handle: VSI handle to retrieve info from
4981 * @promisc_mask: pointer to mask to be filled in
4982 * @vid: VLAN ID of promisc VLAN VSI
4983 * @sw: pointer to switch info struct for which function add rule
4985 static enum ice_status
4986 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4987 u16 *vid, struct ice_switch_info *sw)
4989 struct ice_fltr_mgmt_list_entry *itr;
4990 struct LIST_HEAD_TYPE *rule_head;
4991 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4993 if (!ice_is_vsi_valid(hw, vsi_handle))
4994 return ICE_ERR_PARAM;
4998 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4999 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5001 ice_acquire_lock(rule_lock);
5002 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5004 /* Continue if this filter doesn't apply to this VSI or the
5005 * VSI ID is not in the VSI map for this filter
5007 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5010 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5012 ice_release_lock(rule_lock);
5018 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5019 * @hw: pointer to the hardware structure
5020 * @vsi_handle: VSI handle to retrieve info from
5021 * @promisc_mask: pointer to mask to be filled in
5022 * @vid: VLAN ID of promisc VLAN VSI
5025 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5028 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5029 vid, hw->switch_info);
5033 * ice_remove_promisc - Remove promisc based filter rules
5034 * @hw: pointer to the hardware structure
5035 * @recp_id: recipe ID for which the rule needs to removed
5036 * @v_list: list of promisc entries
5038 static enum ice_status
5039 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5040 struct LIST_HEAD_TYPE *v_list)
5042 struct ice_fltr_list_entry *v_list_itr, *tmp;
5043 struct ice_sw_recipe *recp_list;
5045 recp_list = &hw->switch_info->recp_list[recp_id];
5046 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5048 v_list_itr->status =
5049 ice_remove_rule_internal(hw, recp_list, v_list_itr);
5050 if (v_list_itr->status)
5051 return v_list_itr->status;
5057 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5058 * @hw: pointer to the hardware structure
5059 * @vsi_handle: VSI handle to clear mode
5060 * @promisc_mask: mask of promiscuous config bits to clear
5061 * @vid: VLAN ID to clear VLAN promiscuous
5062 * @sw: pointer to switch info struct for which function add rule
5064 static enum ice_status
5065 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5066 u16 vid, struct ice_switch_info *sw)
5068 struct ice_fltr_list_entry *fm_entry, *tmp;
5069 struct LIST_HEAD_TYPE remove_list_head;
5070 struct ice_fltr_mgmt_list_entry *itr;
5071 struct LIST_HEAD_TYPE *rule_head;
5072 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5073 enum ice_status status = ICE_SUCCESS;
5076 if (!ice_is_vsi_valid(hw, vsi_handle))
5077 return ICE_ERR_PARAM;
5079 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5080 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5082 recipe_id = ICE_SW_LKUP_PROMISC;
5084 rule_head = &sw->recp_list[recipe_id].filt_rules;
5085 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5087 INIT_LIST_HEAD(&remove_list_head);
5089 ice_acquire_lock(rule_lock);
5090 LIST_FOR_EACH_ENTRY(itr, rule_head,
5091 ice_fltr_mgmt_list_entry, list_entry) {
5092 struct ice_fltr_info *fltr_info;
5093 u8 fltr_promisc_mask = 0;
5095 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5097 fltr_info = &itr->fltr_info;
5099 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5100 vid != fltr_info->l_data.mac_vlan.vlan_id)
5103 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5105 /* Skip if filter is not completely specified by given mask */
5106 if (fltr_promisc_mask & ~promisc_mask)
5109 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5113 ice_release_lock(rule_lock);
5114 goto free_fltr_list;
5117 ice_release_lock(rule_lock);
5119 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5122 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5123 ice_fltr_list_entry, list_entry) {
5124 LIST_DEL(&fm_entry->list_entry);
5125 ice_free(hw, fm_entry);
5132 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5133 * @hw: pointer to the hardware structure
5134 * @vsi_handle: VSI handle to clear mode
5135 * @promisc_mask: mask of promiscuous config bits to clear
5136 * @vid: VLAN ID to clear VLAN promiscuous
5139 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5140 u8 promisc_mask, u16 vid)
5142 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5143 vid, hw->switch_info);
5147 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5148 * @hw: pointer to the hardware structure
5149 * @vsi_handle: VSI handle to configure
5150 * @promisc_mask: mask of promiscuous config bits
5151 * @vid: VLAN ID to set VLAN promiscuous
5152 * @lport: logical port number to configure promisc mode
5153 * @sw: pointer to switch info struct for which function add rule
5155 static enum ice_status
5156 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5157 u16 vid, u8 lport, struct ice_switch_info *sw)
5159 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5160 struct ice_fltr_list_entry f_list_entry;
5161 struct ice_fltr_info new_fltr;
5162 enum ice_status status = ICE_SUCCESS;
5168 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5170 if (!ice_is_vsi_valid(hw, vsi_handle))
5171 return ICE_ERR_PARAM;
5172 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5174 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5176 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5177 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5178 new_fltr.l_data.mac_vlan.vlan_id = vid;
5179 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5181 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5182 recipe_id = ICE_SW_LKUP_PROMISC;
5185 /* Separate filters must be set for each direction/packet type
5186 * combination, so we will loop over the mask value, store the
5187 * individual type, and clear it out in the input mask as it
5190 while (promisc_mask) {
5191 struct ice_sw_recipe *recp_list;
5197 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5198 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5199 pkt_type = UCAST_FLTR;
5200 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5201 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5202 pkt_type = UCAST_FLTR;
5204 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5205 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5206 pkt_type = MCAST_FLTR;
5207 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5208 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5209 pkt_type = MCAST_FLTR;
5211 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5212 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5213 pkt_type = BCAST_FLTR;
5214 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5215 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5216 pkt_type = BCAST_FLTR;
5220 /* Check for VLAN promiscuous flag */
5221 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5222 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5223 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5224 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5228 /* Set filter DA based on packet type */
5229 mac_addr = new_fltr.l_data.mac.mac_addr;
5230 if (pkt_type == BCAST_FLTR) {
5231 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5232 } else if (pkt_type == MCAST_FLTR ||
5233 pkt_type == UCAST_FLTR) {
5234 /* Use the dummy ether header DA */
5235 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5236 ICE_NONDMA_TO_NONDMA);
5237 if (pkt_type == MCAST_FLTR)
5238 mac_addr[0] |= 0x1; /* Set multicast bit */
5241 /* Need to reset this to zero for all iterations */
5244 new_fltr.flag |= ICE_FLTR_TX;
5245 new_fltr.src = hw_vsi_id;
5247 new_fltr.flag |= ICE_FLTR_RX;
5248 new_fltr.src = lport;
5251 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5252 new_fltr.vsi_handle = vsi_handle;
5253 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5254 f_list_entry.fltr_info = new_fltr;
5255 recp_list = &sw->recp_list[recipe_id];
5257 status = ice_add_rule_internal(hw, recp_list, lport,
5259 if (status != ICE_SUCCESS)
5260 goto set_promisc_exit;
5268 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5269 * @hw: pointer to the hardware structure
5270 * @vsi_handle: VSI handle to configure
5271 * @promisc_mask: mask of promiscuous config bits
5272 * @vid: VLAN ID to set VLAN promiscuous
5275 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5278 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5279 hw->port_info->lport,
5284 * _ice_set_vlan_vsi_promisc
5285 * @hw: pointer to the hardware structure
5286 * @vsi_handle: VSI handle to configure
5287 * @promisc_mask: mask of promiscuous config bits
5288 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5289 * @lport: logical port number to configure promisc mode
5290 * @sw: pointer to switch info struct for which function add rule
5292 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5294 static enum ice_status
5295 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5296 bool rm_vlan_promisc, u8 lport,
5297 struct ice_switch_info *sw)
5299 struct ice_fltr_list_entry *list_itr, *tmp;
5300 struct LIST_HEAD_TYPE vsi_list_head;
5301 struct LIST_HEAD_TYPE *vlan_head;
5302 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5303 enum ice_status status;
5306 INIT_LIST_HEAD(&vsi_list_head);
5307 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5308 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5309 ice_acquire_lock(vlan_lock);
5310 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5312 ice_release_lock(vlan_lock);
5314 goto free_fltr_list;
5316 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5318 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5319 if (rm_vlan_promisc)
5320 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5324 status = _ice_set_vsi_promisc(hw, vsi_handle,
5325 promisc_mask, vlan_id,
5332 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5333 ice_fltr_list_entry, list_entry) {
5334 LIST_DEL(&list_itr->list_entry);
5335 ice_free(hw, list_itr);
5341 * ice_set_vlan_vsi_promisc
5342 * @hw: pointer to the hardware structure
5343 * @vsi_handle: VSI handle to configure
5344 * @promisc_mask: mask of promiscuous config bits
5345 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5347 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5350 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5351 bool rm_vlan_promisc)
5353 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5354 rm_vlan_promisc, hw->port_info->lport,
5359 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5360 * @hw: pointer to the hardware structure
5361 * @vsi_handle: VSI handle to remove filters from
5362 * @recp_list: recipe list from which function remove fltr
5363 * @lkup: switch rule filter lookup type
5366 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5367 struct ice_sw_recipe *recp_list,
5368 enum ice_sw_lkup_type lkup)
5370 struct ice_fltr_list_entry *fm_entry;
5371 struct LIST_HEAD_TYPE remove_list_head;
5372 struct LIST_HEAD_TYPE *rule_head;
5373 struct ice_fltr_list_entry *tmp;
5374 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5375 enum ice_status status;
5377 INIT_LIST_HEAD(&remove_list_head);
5378 rule_lock = &recp_list[lkup].filt_rule_lock;
5379 rule_head = &recp_list[lkup].filt_rules;
5380 ice_acquire_lock(rule_lock);
5381 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5383 ice_release_lock(rule_lock);
5388 case ICE_SW_LKUP_MAC:
5389 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5391 case ICE_SW_LKUP_VLAN:
5392 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5394 case ICE_SW_LKUP_PROMISC:
5395 case ICE_SW_LKUP_PROMISC_VLAN:
5396 ice_remove_promisc(hw, lkup, &remove_list_head);
5398 case ICE_SW_LKUP_MAC_VLAN:
5399 ice_remove_mac_vlan(hw, &remove_list_head);
5401 case ICE_SW_LKUP_ETHERTYPE:
5402 case ICE_SW_LKUP_ETHERTYPE_MAC:
5403 ice_remove_eth_mac(hw, &remove_list_head);
5405 case ICE_SW_LKUP_DFLT:
5406 ice_debug(hw, ICE_DBG_SW,
5407 "Remove filters for this lookup type hasn't been implemented yet\n");
5409 case ICE_SW_LKUP_LAST:
5410 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5414 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5415 ice_fltr_list_entry, list_entry) {
5416 LIST_DEL(&fm_entry->list_entry);
5417 ice_free(hw, fm_entry);
5422 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5423 * @hw: pointer to the hardware structure
5424 * @vsi_handle: VSI handle to remove filters from
5425 * @sw: pointer to switch info struct
5428 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5429 struct ice_switch_info *sw)
5431 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5433 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5434 sw->recp_list, ICE_SW_LKUP_MAC);
5435 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5436 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5437 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5438 sw->recp_list, ICE_SW_LKUP_PROMISC);
5439 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5440 sw->recp_list, ICE_SW_LKUP_VLAN);
5441 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5442 sw->recp_list, ICE_SW_LKUP_DFLT);
5443 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5444 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5445 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5446 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5447 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5448 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5452 * ice_remove_vsi_fltr - Remove all filters for a VSI
5453 * @hw: pointer to the hardware structure
5454 * @vsi_handle: VSI handle to remove filters from
5456 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5458 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5462 * ice_alloc_res_cntr - allocating resource counter
5463 * @hw: pointer to the hardware structure
5464 * @type: type of resource
5465 * @alloc_shared: if set it is shared else dedicated
5466 * @num_items: number of entries requested for FD resource type
5467 * @counter_id: counter index returned by AQ call
5470 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5473 struct ice_aqc_alloc_free_res_elem *buf;
5474 enum ice_status status;
5477 /* Allocate resource */
5478 buf_len = ice_struct_size(buf, elem, 1);
5479 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5481 return ICE_ERR_NO_MEMORY;
5483 buf->num_elems = CPU_TO_LE16(num_items);
5484 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5485 ICE_AQC_RES_TYPE_M) | alloc_shared);
5487 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5488 ice_aqc_opc_alloc_res, NULL);
5492 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5500 * ice_free_res_cntr - free resource counter
5501 * @hw: pointer to the hardware structure
5502 * @type: type of resource
5503 * @alloc_shared: if set it is shared else dedicated
5504 * @num_items: number of entries to be freed for FD resource type
5505 * @counter_id: counter ID resource which needs to be freed
5508 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5511 struct ice_aqc_alloc_free_res_elem *buf;
5512 enum ice_status status;
5516 buf_len = ice_struct_size(buf, elem, 1);
5517 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5519 return ICE_ERR_NO_MEMORY;
5521 buf->num_elems = CPU_TO_LE16(num_items);
5522 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5523 ICE_AQC_RES_TYPE_M) | alloc_shared);
5524 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5526 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5527 ice_aqc_opc_free_res, NULL);
5529 ice_debug(hw, ICE_DBG_SW,
5530 "counter resource could not be freed\n");
5537 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5538 * @hw: pointer to the hardware structure
5539 * @counter_id: returns counter index
5541 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5543 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5544 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5549 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5550 * @hw: pointer to the hardware structure
5551 * @counter_id: counter index to be freed
5553 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5555 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5556 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5561 * ice_alloc_res_lg_act - add large action resource
5562 * @hw: pointer to the hardware structure
5563 * @l_id: large action ID to fill it in
5564 * @num_acts: number of actions to hold with a large action entry
5566 static enum ice_status
5567 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5569 struct ice_aqc_alloc_free_res_elem *sw_buf;
5570 enum ice_status status;
5573 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5574 return ICE_ERR_PARAM;
5576 /* Allocate resource for large action */
5577 buf_len = ice_struct_size(sw_buf, elem, 1);
5578 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5580 return ICE_ERR_NO_MEMORY;
5582 sw_buf->num_elems = CPU_TO_LE16(1);
5584 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5585 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5586 * If num_acts is greater than 2, then use
5587 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5588 * The num_acts cannot exceed 4. This was ensured at the
5589 * beginning of the function.
5592 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5593 else if (num_acts == 2)
5594 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5596 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5598 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5599 ice_aqc_opc_alloc_res, NULL);
5601 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5603 ice_free(hw, sw_buf);
5608 * ice_add_mac_with_sw_marker - add filter with sw marker
5609 * @hw: pointer to the hardware structure
5610 * @f_info: filter info structure containing the MAC filter information
5611 * @sw_marker: sw marker to tag the Rx descriptor with
5614 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5617 struct ice_fltr_mgmt_list_entry *m_entry;
5618 struct ice_fltr_list_entry fl_info;
5619 struct ice_sw_recipe *recp_list;
5620 struct LIST_HEAD_TYPE l_head;
5621 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5622 enum ice_status ret;
5626 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5627 return ICE_ERR_PARAM;
5629 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5630 return ICE_ERR_PARAM;
5632 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5633 return ICE_ERR_PARAM;
5635 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5636 return ICE_ERR_PARAM;
5637 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5639 /* Add filter if it doesn't exist so then the adding of large
5640 * action always results in update
5643 INIT_LIST_HEAD(&l_head);
5644 fl_info.fltr_info = *f_info;
5645 LIST_ADD(&fl_info.list_entry, &l_head);
5647 entry_exists = false;
5648 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5649 hw->port_info->lport);
5650 if (ret == ICE_ERR_ALREADY_EXISTS)
5651 entry_exists = true;
5655 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5656 rule_lock = &recp_list->filt_rule_lock;
5657 ice_acquire_lock(rule_lock);
5658 /* Get the book keeping entry for the filter */
5659 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5663 /* If counter action was enabled for this rule then don't enable
5664 * sw marker large action
5666 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5667 ret = ICE_ERR_PARAM;
5671 /* if same marker was added before */
5672 if (m_entry->sw_marker_id == sw_marker) {
5673 ret = ICE_ERR_ALREADY_EXISTS;
5677 /* Allocate a hardware table entry to hold large act. Three actions
5678 * for marker based large action
5680 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5684 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5687 /* Update the switch rule to add the marker action */
5688 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5690 ice_release_lock(rule_lock);
5695 ice_release_lock(rule_lock);
5696 /* only remove entry if it did not exist previously */
5698 ret = ice_remove_mac(hw, &l_head);
5704 * ice_add_mac_with_counter - add filter with counter enabled
5705 * @hw: pointer to the hardware structure
5706 * @f_info: pointer to filter info structure containing the MAC filter
5710 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5712 struct ice_fltr_mgmt_list_entry *m_entry;
5713 struct ice_fltr_list_entry fl_info;
5714 struct ice_sw_recipe *recp_list;
5715 struct LIST_HEAD_TYPE l_head;
5716 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5717 enum ice_status ret;
5722 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5723 return ICE_ERR_PARAM;
5725 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5726 return ICE_ERR_PARAM;
5728 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5729 return ICE_ERR_PARAM;
5730 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5731 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5733 entry_exist = false;
5735 rule_lock = &recp_list->filt_rule_lock;
5737 /* Add filter if it doesn't exist so then the adding of large
5738 * action always results in update
5740 INIT_LIST_HEAD(&l_head);
5742 fl_info.fltr_info = *f_info;
5743 LIST_ADD(&fl_info.list_entry, &l_head);
5745 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5746 hw->port_info->lport);
5747 if (ret == ICE_ERR_ALREADY_EXISTS)
5752 ice_acquire_lock(rule_lock);
5753 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5755 ret = ICE_ERR_BAD_PTR;
5759 /* Don't enable counter for a filter for which sw marker was enabled */
5760 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5761 ret = ICE_ERR_PARAM;
5765 /* If a counter was already enabled then don't need to add again */
5766 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5767 ret = ICE_ERR_ALREADY_EXISTS;
5771 /* Allocate a hardware table entry to VLAN counter */
5772 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5776 /* Allocate a hardware table entry to hold large act. Two actions for
5777 * counter based large action
5779 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5783 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5786 /* Update the switch rule to add the counter action */
5787 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5789 ice_release_lock(rule_lock);
5794 ice_release_lock(rule_lock);
5795 /* only remove entry if it did not exist previously */
5797 ret = ice_remove_mac(hw, &l_head);
5802 /* This is mapping table entry that maps every word within a given protocol
5803 * structure to the real byte offset as per the specification of that
5805 * for example dst address is 3 words in ethertype header and corresponding
5806 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5807 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5808 * matching entry describing its field. This needs to be updated if new
5809 * structure is added to that union.
5811 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5812 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5813 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5814 { ICE_ETYPE_OL, { 0 } },
5815 { ICE_VLAN_OFOS, { 0, 2 } },
5816 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5817 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5818 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5819 26, 28, 30, 32, 34, 36, 38 } },
5820 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5821 26, 28, 30, 32, 34, 36, 38 } },
5822 { ICE_TCP_IL, { 0, 2 } },
5823 { ICE_UDP_OF, { 0, 2 } },
5824 { ICE_UDP_ILOS, { 0, 2 } },
5825 { ICE_SCTP_IL, { 0, 2 } },
5826 { ICE_VXLAN, { 8, 10, 12, 14 } },
5827 { ICE_GENEVE, { 8, 10, 12, 14 } },
5828 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5829 { ICE_NVGRE, { 0, 2, 4, 6 } },
5830 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5831 { ICE_PPPOE, { 0, 2, 4, 6 } },
5832 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5833 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5834 { ICE_ESP, { 0, 2, 4, 6 } },
5835 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5836 { ICE_NAT_T, { 8, 10, 12, 14 } },
5837 { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
5840 /* The following table describes preferred grouping of recipes.
5841 * If a recipe that needs to be programmed is a superset or matches one of the
5842 * following combinations, then the recipe needs to be chained as per the
5846 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5847 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5848 { ICE_MAC_IL, ICE_MAC_IL_HW },
5849 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5850 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5851 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5852 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5853 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5854 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5855 { ICE_TCP_IL, ICE_TCP_IL_HW },
5856 { ICE_UDP_OF, ICE_UDP_OF_HW },
5857 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5858 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5859 { ICE_VXLAN, ICE_UDP_OF_HW },
5860 { ICE_GENEVE, ICE_UDP_OF_HW },
5861 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5862 { ICE_NVGRE, ICE_GRE_OF_HW },
5863 { ICE_GTP, ICE_UDP_OF_HW },
5864 { ICE_PPPOE, ICE_PPPOE_HW },
5865 { ICE_PFCP, ICE_UDP_ILOS_HW },
5866 { ICE_L2TPV3, ICE_L2TPV3_HW },
5867 { ICE_ESP, ICE_ESP_HW },
5868 { ICE_AH, ICE_AH_HW },
5869 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5870 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
5874 * ice_find_recp - find a recipe
5875 * @hw: pointer to the hardware structure
5876 * @lkup_exts: extension sequence to match
5878 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5880 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5881 enum ice_sw_tunnel_type tun_type)
5883 bool refresh_required = true;
5884 struct ice_sw_recipe *recp;
5887 /* Walk through existing recipes to find a match */
5888 recp = hw->switch_info->recp_list;
5889 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5890 /* If recipe was not created for this ID, in SW bookkeeping,
5891 * check if FW has an entry for this recipe. If the FW has an
5892 * entry update it in our SW bookkeeping and continue with the
5895 if (!recp[i].recp_created)
5896 if (ice_get_recp_frm_fw(hw,
5897 hw->switch_info->recp_list, i,
5901 /* Skip inverse action recipes */
5902 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5903 ICE_AQ_RECIPE_ACT_INV_ACT)
5906 /* if number of words we are looking for match */
5907 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5908 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5909 struct ice_fv_word *be = lkup_exts->fv_words;
5910 u16 *cr = recp[i].lkup_exts.field_mask;
5911 u16 *de = lkup_exts->field_mask;
5915 /* ar, cr, and qr are related to the recipe words, while
5916 * be, de, and pe are related to the lookup words
5918 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5919 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5921 if (ar[qr].off == be[pe].off &&
5922 ar[qr].prot_id == be[pe].prot_id &&
5924 /* Found the "pe"th word in the
5929 /* After walking through all the words in the
5930 * "i"th recipe if "p"th word was not found then
5931 * this recipe is not what we are looking for.
5932 * So break out from this loop and try the next
5935 if (qr >= recp[i].lkup_exts.n_val_words) {
5940 /* If for "i"th recipe the found was never set to false
5941 * then it means we found our match
5943 if (tun_type == recp[i].tun_type && found)
5944 return i; /* Return the recipe ID */
5947 return ICE_MAX_NUM_RECIPES;
5951 * ice_prot_type_to_id - get protocol ID from protocol type
5952 * @type: protocol type
5953 * @id: pointer to variable that will receive the ID
5955 * Returns true if found, false otherwise
5957 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5961 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5962 if (ice_prot_id_tbl[i].type == type) {
5963 *id = ice_prot_id_tbl[i].protocol_id;
5970 * ice_find_valid_words - count valid words
5971 * @rule: advanced rule with lookup information
5972 * @lkup_exts: byte offset extractions of the words that are valid
5974 * calculate valid words in a lookup rule using mask value
5977 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5978 struct ice_prot_lkup_ext *lkup_exts)
5980 u8 j, word, prot_id, ret_val;
5982 if (!ice_prot_type_to_id(rule->type, &prot_id))
5985 word = lkup_exts->n_val_words;
5987 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5988 if (((u16 *)&rule->m_u)[j] &&
5989 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5990 /* No more space to accommodate */
5991 if (word >= ICE_MAX_CHAIN_WORDS)
5993 lkup_exts->fv_words[word].off =
5994 ice_prot_ext[rule->type].offs[j];
5995 lkup_exts->fv_words[word].prot_id =
5996 ice_prot_id_tbl[rule->type].protocol_id;
5997 lkup_exts->field_mask[word] =
5998 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6002 ret_val = word - lkup_exts->n_val_words;
6003 lkup_exts->n_val_words = word;
6009 * ice_create_first_fit_recp_def - Create a recipe grouping
6010 * @hw: pointer to the hardware structure
6011 * @lkup_exts: an array of protocol header extractions
6012 * @rg_list: pointer to a list that stores new recipe groups
6013 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6015 * Using first fit algorithm, take all the words that are still not done
6016 * and start grouping them in 4-word groups. Each group makes up one
6019 static enum ice_status
6020 ice_create_first_fit_recp_def(struct ice_hw *hw,
6021 struct ice_prot_lkup_ext *lkup_exts,
6022 struct LIST_HEAD_TYPE *rg_list,
6025 struct ice_pref_recipe_group *grp = NULL;
6030 if (!lkup_exts->n_val_words) {
6031 struct ice_recp_grp_entry *entry;
6033 entry = (struct ice_recp_grp_entry *)
6034 ice_malloc(hw, sizeof(*entry));
6036 return ICE_ERR_NO_MEMORY;
6037 LIST_ADD(&entry->l_entry, rg_list);
6038 grp = &entry->r_group;
6040 grp->n_val_pairs = 0;
6043 /* Walk through every word in the rule to check if it is not done. If so
6044 * then this word needs to be part of a new recipe.
6046 for (j = 0; j < lkup_exts->n_val_words; j++)
6047 if (!ice_is_bit_set(lkup_exts->done, j)) {
6049 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6050 struct ice_recp_grp_entry *entry;
6052 entry = (struct ice_recp_grp_entry *)
6053 ice_malloc(hw, sizeof(*entry));
6055 return ICE_ERR_NO_MEMORY;
6056 LIST_ADD(&entry->l_entry, rg_list);
6057 grp = &entry->r_group;
6061 grp->pairs[grp->n_val_pairs].prot_id =
6062 lkup_exts->fv_words[j].prot_id;
6063 grp->pairs[grp->n_val_pairs].off =
6064 lkup_exts->fv_words[j].off;
6065 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6073 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6074 * @hw: pointer to the hardware structure
6075 * @fv_list: field vector with the extraction sequence information
6076 * @rg_list: recipe groupings with protocol-offset pairs
6078 * Helper function to fill in the field vector indices for protocol-offset
6079 * pairs. These indexes are then ultimately programmed into a recipe.
6081 static enum ice_status
6082 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6083 struct LIST_HEAD_TYPE *rg_list)
6085 struct ice_sw_fv_list_entry *fv;
6086 struct ice_recp_grp_entry *rg;
6087 struct ice_fv_word *fv_ext;
6089 if (LIST_EMPTY(fv_list))
6092 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6093 fv_ext = fv->fv_ptr->ew;
6095 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6098 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6099 struct ice_fv_word *pr;
6104 pr = &rg->r_group.pairs[i];
6105 mask = rg->r_group.mask[i];
6107 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6108 if (fv_ext[j].prot_id == pr->prot_id &&
6109 fv_ext[j].off == pr->off) {
6112 /* Store index of field vector */
6114 rg->fv_mask[i] = mask;
6118 /* Protocol/offset could not be found, caller gave an
6122 return ICE_ERR_PARAM;
6130 * ice_find_free_recp_res_idx - find free result indexes for recipe
6131 * @hw: pointer to hardware structure
6132 * @profiles: bitmap of profiles that will be associated with the new recipe
6133 * @free_idx: pointer to variable to receive the free index bitmap
6135 * The algorithm used here is:
6136 * 1. When creating a new recipe, create a set P which contains all
6137 * Profiles that will be associated with our new recipe
6139 * 2. For each Profile p in set P:
6140 * a. Add all recipes associated with Profile p into set R
6141 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6142 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6143 * i. Or just assume they all have the same possible indexes:
6145 * i.e., PossibleIndexes = 0x0000F00000000000
6147 * 3. For each Recipe r in set R:
6148 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6149 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6151 * FreeIndexes will contain the bits indicating the indexes free for use,
6152 * then the code needs to update the recipe[r].used_result_idx_bits to
6153 * indicate which indexes were selected for use by this recipe.
6156 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6157 ice_bitmap_t *free_idx)
6159 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6160 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6161 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6164 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6165 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6166 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6167 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6169 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6171 /* For each profile we are going to associate the recipe with, add the
6172 * recipes that are associated with that profile. This will give us
6173 * the set of recipes that our recipe may collide with. Also, determine
6174 * what possible result indexes are usable given this set of profiles.
6176 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6177 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6178 ICE_MAX_NUM_RECIPES);
6179 ice_and_bitmap(possible_idx, possible_idx,
6180 hw->switch_info->prof_res_bm[bit],
6184 /* For each recipe that our new recipe may collide with, determine
6185 * which indexes have been used.
6187 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6188 ice_or_bitmap(used_idx, used_idx,
6189 hw->switch_info->recp_list[bit].res_idxs,
6192 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6194 /* return number of free indexes */
6195 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6199 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6200 * @hw: pointer to hardware structure
6201 * @rm: recipe management list entry
6202 * @profiles: bitmap of profiles that will be associated.
6204 static enum ice_status
6205 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6206 ice_bitmap_t *profiles)
6208 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6209 struct ice_aqc_recipe_data_elem *tmp;
6210 struct ice_aqc_recipe_data_elem *buf;
6211 struct ice_recp_grp_entry *entry;
6212 enum ice_status status;
6218 /* When more than one recipe are required, another recipe is needed to
6219 * chain them together. Matching a tunnel metadata ID takes up one of
6220 * the match fields in the chaining recipe reducing the number of
6221 * chained recipes by one.
6223 /* check number of free result indices */
6224 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6225 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6227 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6228 free_res_idx, rm->n_grp_count);
6230 if (rm->n_grp_count > 1) {
6231 if (rm->n_grp_count > free_res_idx)
6232 return ICE_ERR_MAX_LIMIT;
6237 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6238 return ICE_ERR_MAX_LIMIT;
6240 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6241 ICE_MAX_NUM_RECIPES,
6244 return ICE_ERR_NO_MEMORY;
6246 buf = (struct ice_aqc_recipe_data_elem *)
6247 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6249 status = ICE_ERR_NO_MEMORY;
6253 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6254 recipe_count = ICE_MAX_NUM_RECIPES;
6255 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6257 if (status || recipe_count == 0)
6260 /* Allocate the recipe resources, and configure them according to the
6261 * match fields from protocol headers and extracted field vectors.
6263 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6264 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6267 status = ice_alloc_recipe(hw, &entry->rid);
6271 /* Clear the result index of the located recipe, as this will be
6272 * updated, if needed, later in the recipe creation process.
6274 tmp[0].content.result_indx = 0;
6276 buf[recps] = tmp[0];
6277 buf[recps].recipe_indx = (u8)entry->rid;
6278 /* if the recipe is a non-root recipe RID should be programmed
6279 * as 0 for the rules to be applied correctly.
6281 buf[recps].content.rid = 0;
6282 ice_memset(&buf[recps].content.lkup_indx, 0,
6283 sizeof(buf[recps].content.lkup_indx),
6286 /* All recipes use look-up index 0 to match switch ID. */
6287 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6288 buf[recps].content.mask[0] =
6289 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6290 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6293 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6294 buf[recps].content.lkup_indx[i] = 0x80;
6295 buf[recps].content.mask[i] = 0;
6298 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6299 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6300 buf[recps].content.mask[i + 1] =
6301 CPU_TO_LE16(entry->fv_mask[i]);
6304 if (rm->n_grp_count > 1) {
6305 /* Checks to see if there really is a valid result index
6308 if (chain_idx >= ICE_MAX_FV_WORDS) {
6309 ice_debug(hw, ICE_DBG_SW,
6310 "No chain index available\n");
6311 status = ICE_ERR_MAX_LIMIT;
6315 entry->chain_idx = chain_idx;
6316 buf[recps].content.result_indx =
6317 ICE_AQ_RECIPE_RESULT_EN |
6318 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6319 ICE_AQ_RECIPE_RESULT_DATA_M);
6320 ice_clear_bit(chain_idx, result_idx_bm);
6321 chain_idx = ice_find_first_bit(result_idx_bm,
6325 /* fill recipe dependencies */
6326 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6327 ICE_MAX_NUM_RECIPES);
6328 ice_set_bit(buf[recps].recipe_indx,
6329 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6330 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6334 if (rm->n_grp_count == 1) {
6335 rm->root_rid = buf[0].recipe_indx;
6336 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6337 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6338 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6339 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6340 sizeof(buf[0].recipe_bitmap),
6341 ICE_NONDMA_TO_NONDMA);
6343 status = ICE_ERR_BAD_PTR;
6346 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6347 * the recipe which is getting created if specified
6348 * by user. Usually any advanced switch filter, which results
6349 * into new extraction sequence, ended up creating a new recipe
6350 * of type ROOT and usually recipes are associated with profiles
6351 * Switch rule referreing newly created recipe, needs to have
6352 * either/or 'fwd' or 'join' priority, otherwise switch rule
6353 * evaluation will not happen correctly. In other words, if
6354 * switch rule to be evaluated on priority basis, then recipe
6355 * needs to have priority, otherwise it will be evaluated last.
6357 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6359 struct ice_recp_grp_entry *last_chain_entry;
6362 /* Allocate the last recipe that will chain the outcomes of the
6363 * other recipes together
6365 status = ice_alloc_recipe(hw, &rid);
6369 buf[recps].recipe_indx = (u8)rid;
6370 buf[recps].content.rid = (u8)rid;
6371 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6372 /* the new entry created should also be part of rg_list to
6373 * make sure we have complete recipe
6375 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6376 sizeof(*last_chain_entry));
6377 if (!last_chain_entry) {
6378 status = ICE_ERR_NO_MEMORY;
6381 last_chain_entry->rid = rid;
6382 ice_memset(&buf[recps].content.lkup_indx, 0,
6383 sizeof(buf[recps].content.lkup_indx),
6385 /* All recipes use look-up index 0 to match switch ID. */
6386 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6387 buf[recps].content.mask[0] =
6388 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6389 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6390 buf[recps].content.lkup_indx[i] =
6391 ICE_AQ_RECIPE_LKUP_IGNORE;
6392 buf[recps].content.mask[i] = 0;
6396 /* update r_bitmap with the recp that is used for chaining */
6397 ice_set_bit(rid, rm->r_bitmap);
6398 /* this is the recipe that chains all the other recipes so it
6399 * should not have a chaining ID to indicate the same
6401 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6402 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6404 last_chain_entry->fv_idx[i] = entry->chain_idx;
6405 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6406 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6407 ice_set_bit(entry->rid, rm->r_bitmap);
6409 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6410 if (sizeof(buf[recps].recipe_bitmap) >=
6411 sizeof(rm->r_bitmap)) {
6412 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6413 sizeof(buf[recps].recipe_bitmap),
6414 ICE_NONDMA_TO_NONDMA);
6416 status = ICE_ERR_BAD_PTR;
6419 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6422 rm->root_rid = (u8)rid;
6424 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6428 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6429 ice_release_change_lock(hw);
6433 /* Every recipe that just got created add it to the recipe
6436 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6437 struct ice_switch_info *sw = hw->switch_info;
6438 bool is_root, idx_found = false;
6439 struct ice_sw_recipe *recp;
6440 u16 idx, buf_idx = 0;
6442 /* find buffer index for copying some data */
6443 for (idx = 0; idx < rm->n_grp_count; idx++)
6444 if (buf[idx].recipe_indx == entry->rid) {
6450 status = ICE_ERR_OUT_OF_RANGE;
6454 recp = &sw->recp_list[entry->rid];
6455 is_root = (rm->root_rid == entry->rid);
6456 recp->is_root = is_root;
6458 recp->root_rid = entry->rid;
6459 recp->big_recp = (is_root && rm->n_grp_count > 1);
6461 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6462 entry->r_group.n_val_pairs *
6463 sizeof(struct ice_fv_word),
6464 ICE_NONDMA_TO_NONDMA);
6466 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6467 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6469 /* Copy non-result fv index values and masks to recipe. This
6470 * call will also update the result recipe bitmask.
6472 ice_collect_result_idx(&buf[buf_idx], recp);
6474 /* for non-root recipes, also copy to the root, this allows
6475 * easier matching of a complete chained recipe
6478 ice_collect_result_idx(&buf[buf_idx],
6479 &sw->recp_list[rm->root_rid]);
6481 recp->n_ext_words = entry->r_group.n_val_pairs;
6482 recp->chain_idx = entry->chain_idx;
6483 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6484 recp->n_grp_count = rm->n_grp_count;
6485 recp->tun_type = rm->tun_type;
6486 recp->recp_created = true;
6500 * ice_create_recipe_group - creates recipe group
6501 * @hw: pointer to hardware structure
6502 * @rm: recipe management list entry
6503 * @lkup_exts: lookup elements
6505 static enum ice_status
6506 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6507 struct ice_prot_lkup_ext *lkup_exts)
6509 enum ice_status status;
6512 rm->n_grp_count = 0;
6514 /* Create recipes for words that are marked not done by packing them
6517 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6518 &rm->rg_list, &recp_count);
6520 rm->n_grp_count += recp_count;
6521 rm->n_ext_words = lkup_exts->n_val_words;
6522 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6523 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6524 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6525 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6532 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6533 * @hw: pointer to hardware structure
6534 * @lkups: lookup elements or match criteria for the advanced recipe, one
6535 * structure per protocol header
6536 * @lkups_cnt: number of protocols
6537 * @bm: bitmap of field vectors to consider
6538 * @fv_list: pointer to a list that holds the returned field vectors
6540 static enum ice_status
6541 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6542 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6544 enum ice_status status;
6551 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6553 return ICE_ERR_NO_MEMORY;
6555 for (i = 0; i < lkups_cnt; i++)
6556 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6557 status = ICE_ERR_CFG;
6561 /* Find field vectors that include all specified protocol types */
6562 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6565 ice_free(hw, prot_ids);
6570 * ice_tun_type_match_mask - determine if tun type needs a match mask
6571 * @tun_type: tunnel type
6572 * @mask: mask to be used for the tunnel
6574 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6577 case ICE_SW_TUN_VXLAN_GPE:
6578 case ICE_SW_TUN_GENEVE:
6579 case ICE_SW_TUN_VXLAN:
6580 case ICE_SW_TUN_NVGRE:
6581 case ICE_SW_TUN_UDP:
6582 case ICE_ALL_TUNNELS:
6583 *mask = ICE_TUN_FLAG_MASK;
6586 case ICE_SW_TUN_GENEVE_VLAN:
6587 case ICE_SW_TUN_VXLAN_VLAN:
6588 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6598 * ice_add_special_words - Add words that are not protocols, such as metadata
6599 * @rinfo: other information regarding the rule e.g. priority and action info
6600 * @lkup_exts: lookup word structure
6602 static enum ice_status
6603 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6604 struct ice_prot_lkup_ext *lkup_exts)
6608 /* If this is a tunneled packet, then add recipe index to match the
6609 * tunnel bit in the packet metadata flags.
6611 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6612 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6613 u8 word = lkup_exts->n_val_words++;
6615 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6616 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6617 lkup_exts->field_mask[word] = mask;
6619 return ICE_ERR_MAX_LIMIT;
6626 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6627 * @hw: pointer to hardware structure
6628 * @rinfo: other information regarding the rule e.g. priority and action info
6629 * @bm: pointer to memory for returning the bitmap of field vectors
6632 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6635 enum ice_prof_type prof_type;
6637 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6639 switch (rinfo->tun_type) {
6641 prof_type = ICE_PROF_NON_TUN;
6643 case ICE_ALL_TUNNELS:
6644 prof_type = ICE_PROF_TUN_ALL;
6646 case ICE_SW_TUN_VXLAN_GPE:
6647 case ICE_SW_TUN_GENEVE:
6648 case ICE_SW_TUN_GENEVE_VLAN:
6649 case ICE_SW_TUN_VXLAN:
6650 case ICE_SW_TUN_VXLAN_VLAN:
6651 case ICE_SW_TUN_UDP:
6652 case ICE_SW_TUN_GTP:
6653 prof_type = ICE_PROF_TUN_UDP;
6655 case ICE_SW_TUN_NVGRE:
6656 prof_type = ICE_PROF_TUN_GRE;
6658 case ICE_SW_TUN_PPPOE:
6659 prof_type = ICE_PROF_TUN_PPPOE;
6661 case ICE_SW_TUN_PPPOE_PAY:
6662 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6664 case ICE_SW_TUN_PPPOE_IPV4:
6665 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6666 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6667 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6669 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6670 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6672 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6673 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6675 case ICE_SW_TUN_PPPOE_IPV6:
6676 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6677 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6678 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6680 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6681 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6683 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6684 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6686 case ICE_SW_TUN_PROFID_IPV6_ESP:
6687 case ICE_SW_TUN_IPV6_ESP:
6688 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6690 case ICE_SW_TUN_PROFID_IPV6_AH:
6691 case ICE_SW_TUN_IPV6_AH:
6692 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6694 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6695 case ICE_SW_TUN_IPV6_L2TPV3:
6696 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6698 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6699 case ICE_SW_TUN_IPV6_NAT_T:
6700 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6702 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6703 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6705 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6706 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6708 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6709 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6711 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6712 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6714 case ICE_SW_TUN_IPV4_NAT_T:
6715 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6717 case ICE_SW_TUN_IPV4_L2TPV3:
6718 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6720 case ICE_SW_TUN_IPV4_ESP:
6721 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6723 case ICE_SW_TUN_IPV4_AH:
6724 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6726 case ICE_SW_IPV4_TCP:
6727 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6729 case ICE_SW_IPV4_UDP:
6730 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6732 case ICE_SW_IPV6_TCP:
6733 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6735 case ICE_SW_IPV6_UDP:
6736 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6738 case ICE_SW_TUN_IPV4_GTPU_IPV4:
6739 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6740 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6741 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6742 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6743 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6744 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6746 case ICE_SW_TUN_IPV6_GTPU_IPV4:
6747 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6748 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6749 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6750 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6751 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6752 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6754 case ICE_SW_TUN_IPV4_GTPU_IPV6:
6755 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6756 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6757 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6758 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6759 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6760 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6762 case ICE_SW_TUN_IPV6_GTPU_IPV6:
6763 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6764 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6765 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6766 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6767 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6768 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6770 case ICE_SW_TUN_AND_NON_TUN:
6772 prof_type = ICE_PROF_ALL;
6776 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6780 * ice_is_prof_rule - determine if rule type is a profile rule
6781 * @type: the rule type
6783 * if the rule type is a profile rule, that means that there no field value
6784 * match required, in this case just a profile hit is required.
6786 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6789 case ICE_SW_TUN_PROFID_IPV6_ESP:
6790 case ICE_SW_TUN_PROFID_IPV6_AH:
6791 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6792 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6793 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6794 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6795 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6796 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6806 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6807 * @hw: pointer to hardware structure
6808 * @lkups: lookup elements or match criteria for the advanced recipe, one
6809 * structure per protocol header
6810 * @lkups_cnt: number of protocols
6811 * @rinfo: other information regarding the rule e.g. priority and action info
6812 * @rid: return the recipe ID of the recipe created
6814 static enum ice_status
6815 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6816 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6818 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6819 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6820 struct ice_prot_lkup_ext *lkup_exts;
6821 struct ice_recp_grp_entry *r_entry;
6822 struct ice_sw_fv_list_entry *fvit;
6823 struct ice_recp_grp_entry *r_tmp;
6824 struct ice_sw_fv_list_entry *tmp;
6825 enum ice_status status = ICE_SUCCESS;
6826 struct ice_sw_recipe *rm;
6829 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6830 return ICE_ERR_PARAM;
6832 lkup_exts = (struct ice_prot_lkup_ext *)
6833 ice_malloc(hw, sizeof(*lkup_exts));
6835 return ICE_ERR_NO_MEMORY;
6837 /* Determine the number of words to be matched and if it exceeds a
6838 * recipe's restrictions
6840 for (i = 0; i < lkups_cnt; i++) {
6843 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6844 status = ICE_ERR_CFG;
6845 goto err_free_lkup_exts;
6848 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6850 status = ICE_ERR_CFG;
6851 goto err_free_lkup_exts;
6855 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6857 status = ICE_ERR_NO_MEMORY;
6858 goto err_free_lkup_exts;
6861 /* Get field vectors that contain fields extracted from all the protocol
6862 * headers being programmed.
6864 INIT_LIST_HEAD(&rm->fv_list);
6865 INIT_LIST_HEAD(&rm->rg_list);
6867 /* Get bitmap of field vectors (profiles) that are compatible with the
6868 * rule request; only these will be searched in the subsequent call to
6871 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6873 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6877 /* Create any special protocol/offset pairs, such as looking at tunnel
6878 * bits by extracting metadata
6880 status = ice_add_special_words(rinfo, lkup_exts);
6882 goto err_free_lkup_exts;
6884 /* Group match words into recipes using preferred recipe grouping
6887 status = ice_create_recipe_group(hw, rm, lkup_exts);
6891 /* set the recipe priority if specified */
6892 rm->priority = (u8)rinfo->priority;
6894 /* Find offsets from the field vector. Pick the first one for all the
6897 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6901 /* An empty FV list means to use all the profiles returned in the
6904 if (LIST_EMPTY(&rm->fv_list)) {
6907 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
6908 struct ice_sw_fv_list_entry *fvl;
6910 fvl = (struct ice_sw_fv_list_entry *)
6911 ice_malloc(hw, sizeof(*fvl));
6915 fvl->profile_id = j;
6916 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6920 /* get bitmap of all profiles the recipe will be associated with */
6921 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6922 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6924 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6925 ice_set_bit((u16)fvit->profile_id, profiles);
6928 /* Look for a recipe which matches our requested fv / mask list */
6929 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6930 if (*rid < ICE_MAX_NUM_RECIPES)
6931 /* Success if found a recipe that match the existing criteria */
6934 rm->tun_type = rinfo->tun_type;
6935 /* Recipe we need does not exist, add a recipe */
6936 status = ice_add_sw_recipe(hw, rm, profiles);
6940 /* Associate all the recipes created with all the profiles in the
6941 * common field vector.
6943 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6945 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6948 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6949 (u8 *)r_bitmap, NULL);
6953 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6954 ICE_MAX_NUM_RECIPES);
6955 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6959 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6962 ice_release_change_lock(hw);
6967 /* Update profile to recipe bitmap array */
6968 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6969 ICE_MAX_NUM_RECIPES);
6971 /* Update recipe to profile bitmap array */
6972 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
6973 ice_set_bit((u16)fvit->profile_id,
6974 recipe_to_profile[j]);
6977 *rid = rm->root_rid;
6978 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6979 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6981 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6982 ice_recp_grp_entry, l_entry) {
6983 LIST_DEL(&r_entry->l_entry);
6984 ice_free(hw, r_entry);
6987 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6989 LIST_DEL(&fvit->list_entry);
6994 ice_free(hw, rm->root_buf);
6999 ice_free(hw, lkup_exts);
7005 * ice_find_dummy_packet - find dummy packet by tunnel type
7007 * @lkups: lookup elements or match criteria for the advanced recipe, one
7008 * structure per protocol header
7009 * @lkups_cnt: number of protocols
7010 * @tun_type: tunnel type from the match criteria
7011 * @pkt: dummy packet to fill according to filter match criteria
7012 * @pkt_len: packet length of dummy packet
7013 * @offsets: pointer to receive the pointer to the offsets for the packet
7016 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7017 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7019 const struct ice_dummy_pkt_offsets **offsets)
7021 bool tcp = false, udp = false, ipv6 = false, vlan = false;
7025 for (i = 0; i < lkups_cnt; i++) {
7026 if (lkups[i].type == ICE_UDP_ILOS)
7028 else if (lkups[i].type == ICE_TCP_IL)
7030 else if (lkups[i].type == ICE_IPV6_OFOS)
7032 else if (lkups[i].type == ICE_VLAN_OFOS)
7034 else if (lkups[i].type == ICE_IPV4_OFOS &&
7035 lkups[i].h_u.ipv4_hdr.protocol ==
7036 ICE_IPV4_NVGRE_PROTO_ID &&
7037 lkups[i].m_u.ipv4_hdr.protocol ==
7040 else if (lkups[i].type == ICE_PPPOE &&
7041 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7042 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7043 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7046 else if (lkups[i].type == ICE_ETYPE_OL &&
7047 lkups[i].h_u.ethertype.ethtype_id ==
7048 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7049 lkups[i].m_u.ethertype.ethtype_id ==
7052 else if (lkups[i].type == ICE_IPV4_IL &&
7053 lkups[i].h_u.ipv4_hdr.protocol ==
7055 lkups[i].m_u.ipv4_hdr.protocol ==
7060 if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7061 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7062 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7063 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7065 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7066 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7067 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7068 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7070 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7071 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7072 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7073 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7075 } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7076 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7077 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7078 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7080 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7081 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7082 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7083 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7085 } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7086 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7087 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7088 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7092 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7093 *pkt = dummy_ipv4_esp_pkt;
7094 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7095 *offsets = dummy_ipv4_esp_packet_offsets;
7099 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7100 *pkt = dummy_ipv6_esp_pkt;
7101 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7102 *offsets = dummy_ipv6_esp_packet_offsets;
7106 if (tun_type == ICE_SW_TUN_IPV4_AH) {
7107 *pkt = dummy_ipv4_ah_pkt;
7108 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7109 *offsets = dummy_ipv4_ah_packet_offsets;
7113 if (tun_type == ICE_SW_TUN_IPV6_AH) {
7114 *pkt = dummy_ipv6_ah_pkt;
7115 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7116 *offsets = dummy_ipv6_ah_packet_offsets;
7120 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7121 *pkt = dummy_ipv4_nat_pkt;
7122 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7123 *offsets = dummy_ipv4_nat_packet_offsets;
7127 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7128 *pkt = dummy_ipv6_nat_pkt;
7129 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7130 *offsets = dummy_ipv6_nat_packet_offsets;
7134 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7135 *pkt = dummy_ipv4_l2tpv3_pkt;
7136 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7137 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7141 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7142 *pkt = dummy_ipv6_l2tpv3_pkt;
7143 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7144 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7148 if (tun_type == ICE_SW_TUN_GTP) {
7149 *pkt = dummy_udp_gtp_packet;
7150 *pkt_len = sizeof(dummy_udp_gtp_packet);
7151 *offsets = dummy_udp_gtp_packet_offsets;
7155 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7156 *pkt = dummy_pppoe_ipv6_packet;
7157 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7158 *offsets = dummy_pppoe_packet_offsets;
7160 } else if (tun_type == ICE_SW_TUN_PPPOE ||
7161 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7162 *pkt = dummy_pppoe_ipv4_packet;
7163 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7164 *offsets = dummy_pppoe_packet_offsets;
7168 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7169 *pkt = dummy_pppoe_ipv4_packet;
7170 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7171 *offsets = dummy_pppoe_packet_ipv4_offsets;
7175 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7176 *pkt = dummy_pppoe_ipv4_tcp_packet;
7177 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7178 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7182 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7183 *pkt = dummy_pppoe_ipv4_udp_packet;
7184 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7185 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7189 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7190 *pkt = dummy_pppoe_ipv6_packet;
7191 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7192 *offsets = dummy_pppoe_packet_ipv6_offsets;
7196 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7197 *pkt = dummy_pppoe_ipv6_tcp_packet;
7198 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7199 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7203 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7204 *pkt = dummy_pppoe_ipv6_udp_packet;
7205 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7206 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7210 if (tun_type == ICE_SW_IPV4_TCP) {
7211 *pkt = dummy_tcp_packet;
7212 *pkt_len = sizeof(dummy_tcp_packet);
7213 *offsets = dummy_tcp_packet_offsets;
7217 if (tun_type == ICE_SW_IPV4_UDP) {
7218 *pkt = dummy_udp_packet;
7219 *pkt_len = sizeof(dummy_udp_packet);
7220 *offsets = dummy_udp_packet_offsets;
7224 if (tun_type == ICE_SW_IPV6_TCP) {
7225 *pkt = dummy_tcp_ipv6_packet;
7226 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7227 *offsets = dummy_tcp_ipv6_packet_offsets;
7231 if (tun_type == ICE_SW_IPV6_UDP) {
7232 *pkt = dummy_udp_ipv6_packet;
7233 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7234 *offsets = dummy_udp_ipv6_packet_offsets;
7238 if (tun_type == ICE_ALL_TUNNELS) {
7239 *pkt = dummy_gre_udp_packet;
7240 *pkt_len = sizeof(dummy_gre_udp_packet);
7241 *offsets = dummy_gre_udp_packet_offsets;
7245 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7247 *pkt = dummy_gre_tcp_packet;
7248 *pkt_len = sizeof(dummy_gre_tcp_packet);
7249 *offsets = dummy_gre_tcp_packet_offsets;
7253 *pkt = dummy_gre_udp_packet;
7254 *pkt_len = sizeof(dummy_gre_udp_packet);
7255 *offsets = dummy_gre_udp_packet_offsets;
7259 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7260 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7261 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7262 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7264 *pkt = dummy_udp_tun_tcp_packet;
7265 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7266 *offsets = dummy_udp_tun_tcp_packet_offsets;
7270 *pkt = dummy_udp_tun_udp_packet;
7271 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7272 *offsets = dummy_udp_tun_udp_packet_offsets;
7278 *pkt = dummy_vlan_udp_packet;
7279 *pkt_len = sizeof(dummy_vlan_udp_packet);
7280 *offsets = dummy_vlan_udp_packet_offsets;
7283 *pkt = dummy_udp_packet;
7284 *pkt_len = sizeof(dummy_udp_packet);
7285 *offsets = dummy_udp_packet_offsets;
7287 } else if (udp && ipv6) {
7289 *pkt = dummy_vlan_udp_ipv6_packet;
7290 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7291 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7294 *pkt = dummy_udp_ipv6_packet;
7295 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7296 *offsets = dummy_udp_ipv6_packet_offsets;
7298 } else if ((tcp && ipv6) || ipv6) {
7300 *pkt = dummy_vlan_tcp_ipv6_packet;
7301 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7302 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7305 *pkt = dummy_tcp_ipv6_packet;
7306 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7307 *offsets = dummy_tcp_ipv6_packet_offsets;
7312 *pkt = dummy_vlan_tcp_packet;
7313 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7314 *offsets = dummy_vlan_tcp_packet_offsets;
7316 *pkt = dummy_tcp_packet;
7317 *pkt_len = sizeof(dummy_tcp_packet);
7318 *offsets = dummy_tcp_packet_offsets;
7323 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7325 * @lkups: lookup elements or match criteria for the advanced recipe, one
7326 * structure per protocol header
7327 * @lkups_cnt: number of protocols
7328 * @s_rule: stores rule information from the match criteria
7329 * @dummy_pkt: dummy packet to fill according to filter match criteria
7330 * @pkt_len: packet length of dummy packet
7331 * @offsets: offset info for the dummy packet
7333 static enum ice_status
7334 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7335 struct ice_aqc_sw_rules_elem *s_rule,
7336 const u8 *dummy_pkt, u16 pkt_len,
7337 const struct ice_dummy_pkt_offsets *offsets)
7342 /* Start with a packet with a pre-defined/dummy content. Then, fill
7343 * in the header values to be looked up or matched.
7345 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7347 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7349 for (i = 0; i < lkups_cnt; i++) {
7350 enum ice_protocol_type type;
7351 u16 offset = 0, len = 0, j;
7354 /* find the start of this layer; it should be found since this
7355 * was already checked when search for the dummy packet
7357 type = lkups[i].type;
7358 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7359 if (type == offsets[j].type) {
7360 offset = offsets[j].offset;
7365 /* this should never happen in a correct calling sequence */
7367 return ICE_ERR_PARAM;
7369 switch (lkups[i].type) {
7372 len = sizeof(struct ice_ether_hdr);
7375 len = sizeof(struct ice_ethtype_hdr);
7378 len = sizeof(struct ice_vlan_hdr);
7382 len = sizeof(struct ice_ipv4_hdr);
7386 len = sizeof(struct ice_ipv6_hdr);
7391 len = sizeof(struct ice_l4_hdr);
7394 len = sizeof(struct ice_sctp_hdr);
7397 len = sizeof(struct ice_nvgre);
7402 len = sizeof(struct ice_udp_tnl_hdr);
7406 case ICE_GTP_NO_PAY:
7407 len = sizeof(struct ice_udp_gtp_hdr);
7410 len = sizeof(struct ice_pppoe_hdr);
7413 len = sizeof(struct ice_esp_hdr);
7416 len = sizeof(struct ice_nat_t_hdr);
7419 len = sizeof(struct ice_ah_hdr);
7422 len = sizeof(struct ice_l2tpv3_sess_hdr);
7425 return ICE_ERR_PARAM;
7428 /* the length should be a word multiple */
7429 if (len % ICE_BYTES_PER_WORD)
7432 /* We have the offset to the header start, the length, the
7433 * caller's header values and mask. Use this information to
7434 * copy the data into the dummy packet appropriately based on
7435 * the mask. Note that we need to only write the bits as
7436 * indicated by the mask to make sure we don't improperly write
7437 * over any significant packet data.
7439 for (j = 0; j < len / sizeof(u16); j++)
7440 if (((u16 *)&lkups[i].m_u)[j])
7441 ((u16 *)(pkt + offset))[j] =
7442 (((u16 *)(pkt + offset))[j] &
7443 ~((u16 *)&lkups[i].m_u)[j]) |
7444 (((u16 *)&lkups[i].h_u)[j] &
7445 ((u16 *)&lkups[i].m_u)[j]);
7448 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7454 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7455 * @hw: pointer to the hardware structure
7456 * @tun_type: tunnel type
7457 * @pkt: dummy packet to fill in
7458 * @offsets: offset info for the dummy packet
7460 static enum ice_status
7461 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7462 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7467 case ICE_SW_TUN_AND_NON_TUN:
7468 case ICE_SW_TUN_VXLAN_GPE:
7469 case ICE_SW_TUN_VXLAN:
7470 case ICE_SW_TUN_VXLAN_VLAN:
7471 case ICE_SW_TUN_UDP:
7472 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7476 case ICE_SW_TUN_GENEVE:
7477 case ICE_SW_TUN_GENEVE_VLAN:
7478 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7483 /* Nothing needs to be done for this tunnel type */
7487 /* Find the outer UDP protocol header and insert the port number */
7488 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7489 if (offsets[i].type == ICE_UDP_OF) {
7490 struct ice_l4_hdr *hdr;
7493 offset = offsets[i].offset;
7494 hdr = (struct ice_l4_hdr *)&pkt[offset];
7495 hdr->dst_port = CPU_TO_BE16(open_port);
7505 * ice_find_adv_rule_entry - Search a rule entry
7506 * @hw: pointer to the hardware structure
7507 * @lkups: lookup elements or match criteria for the advanced recipe, one
7508 * structure per protocol header
7509 * @lkups_cnt: number of protocols
7510 * @recp_id: recipe ID for which we are finding the rule
7511 * @rinfo: other information regarding the rule e.g. priority and action info
7513 * Helper function to search for a given advance rule entry
7514 * Returns pointer to entry storing the rule if found
7516 static struct ice_adv_fltr_mgmt_list_entry *
7517 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7518 u16 lkups_cnt, u16 recp_id,
7519 struct ice_adv_rule_info *rinfo)
7521 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7522 struct ice_switch_info *sw = hw->switch_info;
7525 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7526 ice_adv_fltr_mgmt_list_entry, list_entry) {
7527 bool lkups_matched = true;
7529 if (lkups_cnt != list_itr->lkups_cnt)
7531 for (i = 0; i < list_itr->lkups_cnt; i++)
7532 if (memcmp(&list_itr->lkups[i], &lkups[i],
7534 lkups_matched = false;
7537 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7538 rinfo->tun_type == list_itr->rule_info.tun_type &&
7546 * ice_adv_add_update_vsi_list
7547 * @hw: pointer to the hardware structure
7548 * @m_entry: pointer to current adv filter management list entry
7549 * @cur_fltr: filter information from the book keeping entry
7550 * @new_fltr: filter information with the new VSI to be added
7552 * Call AQ command to add or update previously created VSI list with new VSI.
7554 * Helper function to do book keeping associated with adding filter information
7555 * The algorithm to do the booking keeping is described below :
7556 * When a VSI needs to subscribe to a given advanced filter
7557 * if only one VSI has been added till now
7558 * Allocate a new VSI list and add two VSIs
7559 * to this list using switch rule command
7560 * Update the previously created switch rule with the
7561 * newly created VSI list ID
7562 * if a VSI list was previously created
7563 * Add the new VSI to the previously created VSI list set
7564 * using the update switch rule command
7566 static enum ice_status
7567 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7568 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7569 struct ice_adv_rule_info *cur_fltr,
7570 struct ice_adv_rule_info *new_fltr)
7572 enum ice_status status;
7573 u16 vsi_list_id = 0;
7575 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7576 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7577 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7578 return ICE_ERR_NOT_IMPL;
7580 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7581 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7582 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7583 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7584 return ICE_ERR_NOT_IMPL;
7586 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7587 /* Only one entry existed in the mapping and it was not already
7588 * a part of a VSI list. So, create a VSI list with the old and
7591 struct ice_fltr_info tmp_fltr;
7592 u16 vsi_handle_arr[2];
7594 /* A rule already exists with the new VSI being added */
7595 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7596 new_fltr->sw_act.fwd_id.hw_vsi_id)
7597 return ICE_ERR_ALREADY_EXISTS;
7599 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7600 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7601 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7607 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7608 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7609 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7610 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7611 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7612 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7614 /* Update the previous switch rule of "forward to VSI" to
7617 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7621 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7622 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7623 m_entry->vsi_list_info =
7624 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7627 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7629 if (!m_entry->vsi_list_info)
7632 /* A rule already exists with the new VSI being added */
7633 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7636 /* Update the previously created VSI list set with
7637 * the new VSI ID passed in
7639 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7641 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7643 ice_aqc_opc_update_sw_rules,
7645 /* update VSI list mapping info with new VSI ID */
7647 ice_set_bit(vsi_handle,
7648 m_entry->vsi_list_info->vsi_map);
7651 m_entry->vsi_count++;
7656 * ice_add_adv_rule - helper function to create an advanced switch rule
7657 * @hw: pointer to the hardware structure
7658 * @lkups: information on the words that needs to be looked up. All words
7659 * together makes one recipe
7660 * @lkups_cnt: num of entries in the lkups array
7661 * @rinfo: other information related to the rule that needs to be programmed
7662 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7663 * ignored is case of error.
7665 * This function can program only 1 rule at a time. The lkups is used to
7666 * describe the all the words that forms the "lookup" portion of the recipe.
7667 * These words can span multiple protocols. Callers to this function need to
7668 * pass in a list of protocol headers with lookup information along and mask
7669 * that determines which words are valid from the given protocol header.
7670 * rinfo describes other information related to this rule such as forwarding
7671 * IDs, priority of this rule, etc.
7674 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7675 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7676 struct ice_rule_query_data *added_entry)
7678 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7679 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7680 const struct ice_dummy_pkt_offsets *pkt_offsets;
7681 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7682 struct LIST_HEAD_TYPE *rule_head;
7683 struct ice_switch_info *sw;
7684 enum ice_status status;
7685 const u8 *pkt = NULL;
7691 /* Initialize profile to result index bitmap */
7692 if (!hw->switch_info->prof_res_bm_init) {
7693 hw->switch_info->prof_res_bm_init = 1;
7694 ice_init_prof_result_bm(hw);
7697 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7698 if (!prof_rule && !lkups_cnt)
7699 return ICE_ERR_PARAM;
7701 /* get # of words we need to match */
7703 for (i = 0; i < lkups_cnt; i++) {
7706 ptr = (u16 *)&lkups[i].m_u;
7707 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7713 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7714 return ICE_ERR_PARAM;
7716 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7717 return ICE_ERR_PARAM;
7720 /* make sure that we can locate a dummy packet */
7721 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7724 status = ICE_ERR_PARAM;
7725 goto err_ice_add_adv_rule;
7728 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7729 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7730 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7731 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7734 vsi_handle = rinfo->sw_act.vsi_handle;
7735 if (!ice_is_vsi_valid(hw, vsi_handle))
7736 return ICE_ERR_PARAM;
7738 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7739 rinfo->sw_act.fwd_id.hw_vsi_id =
7740 ice_get_hw_vsi_num(hw, vsi_handle);
7741 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7742 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7744 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7747 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7749 /* we have to add VSI to VSI_LIST and increment vsi_count.
7750 * Also Update VSI list so that we can change forwarding rule
7751 * if the rule already exists, we will check if it exists with
7752 * same vsi_id, if not then add it to the VSI list if it already
7753 * exists if not then create a VSI list and add the existing VSI
7754 * ID and the new VSI ID to the list
7755 * We will add that VSI to the list
7757 status = ice_adv_add_update_vsi_list(hw, m_entry,
7758 &m_entry->rule_info,
7761 added_entry->rid = rid;
7762 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7763 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7767 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7768 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7770 return ICE_ERR_NO_MEMORY;
7771 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7772 switch (rinfo->sw_act.fltr_act) {
7773 case ICE_FWD_TO_VSI:
7774 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7775 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7776 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7779 act |= ICE_SINGLE_ACT_TO_Q;
7780 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7781 ICE_SINGLE_ACT_Q_INDEX_M;
7783 case ICE_FWD_TO_QGRP:
7784 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7785 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7786 act |= ICE_SINGLE_ACT_TO_Q;
7787 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7788 ICE_SINGLE_ACT_Q_INDEX_M;
7789 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7790 ICE_SINGLE_ACT_Q_REGION_M;
7792 case ICE_DROP_PACKET:
7793 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7794 ICE_SINGLE_ACT_VALID_BIT;
7797 status = ICE_ERR_CFG;
7798 goto err_ice_add_adv_rule;
7801 /* set the rule LOOKUP type based on caller specified 'RX'
7802 * instead of hardcoding it to be either LOOKUP_TX/RX
7804 * for 'RX' set the source to be the port number
7805 * for 'TX' set the source to be the source HW VSI number (determined
7809 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7810 s_rule->pdata.lkup_tx_rx.src =
7811 CPU_TO_LE16(hw->port_info->lport);
7813 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7814 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7817 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7818 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7820 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7821 pkt_len, pkt_offsets);
7823 goto err_ice_add_adv_rule;
7825 if (rinfo->tun_type != ICE_NON_TUN &&
7826 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7827 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7828 s_rule->pdata.lkup_tx_rx.hdr,
7831 goto err_ice_add_adv_rule;
7834 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7835 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7838 goto err_ice_add_adv_rule;
7839 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7840 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7842 status = ICE_ERR_NO_MEMORY;
7843 goto err_ice_add_adv_rule;
7846 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7847 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7848 ICE_NONDMA_TO_NONDMA);
7849 if (!adv_fltr->lkups && !prof_rule) {
7850 status = ICE_ERR_NO_MEMORY;
7851 goto err_ice_add_adv_rule;
7854 adv_fltr->lkups_cnt = lkups_cnt;
7855 adv_fltr->rule_info = *rinfo;
7856 adv_fltr->rule_info.fltr_rule_id =
7857 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7858 sw = hw->switch_info;
7859 sw->recp_list[rid].adv_rule = true;
7860 rule_head = &sw->recp_list[rid].filt_rules;
7862 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7863 adv_fltr->vsi_count = 1;
7865 /* Add rule entry to book keeping list */
7866 LIST_ADD(&adv_fltr->list_entry, rule_head);
7868 added_entry->rid = rid;
7869 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7870 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7872 err_ice_add_adv_rule:
7873 if (status && adv_fltr) {
7874 ice_free(hw, adv_fltr->lkups);
7875 ice_free(hw, adv_fltr);
7878 ice_free(hw, s_rule);
7884 * ice_adv_rem_update_vsi_list
7885 * @hw: pointer to the hardware structure
7886 * @vsi_handle: VSI handle of the VSI to remove
7887 * @fm_list: filter management entry for which the VSI list management needs to
7890 static enum ice_status
7891 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7892 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7894 struct ice_vsi_list_map_info *vsi_list_info;
7895 enum ice_sw_lkup_type lkup_type;
7896 enum ice_status status;
7899 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7900 fm_list->vsi_count == 0)
7901 return ICE_ERR_PARAM;
7903 /* A rule with the VSI being removed does not exist */
7904 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7905 return ICE_ERR_DOES_NOT_EXIST;
7907 lkup_type = ICE_SW_LKUP_LAST;
7908 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7909 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7910 ice_aqc_opc_update_sw_rules,
7915 fm_list->vsi_count--;
7916 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7917 vsi_list_info = fm_list->vsi_list_info;
7918 if (fm_list->vsi_count == 1) {
7919 struct ice_fltr_info tmp_fltr;
7922 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7924 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7925 return ICE_ERR_OUT_OF_RANGE;
7927 /* Make sure VSI list is empty before removing it below */
7928 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7930 ice_aqc_opc_update_sw_rules,
7935 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7936 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7937 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7938 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7939 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7940 tmp_fltr.fwd_id.hw_vsi_id =
7941 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7942 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7943 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7944 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7946 /* Update the previous switch rule of "MAC forward to VSI" to
7947 * "MAC fwd to VSI list"
7949 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7951 ice_debug(hw, ICE_DBG_SW,
7952 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7953 tmp_fltr.fwd_id.hw_vsi_id, status);
7956 fm_list->vsi_list_info->ref_cnt--;
7958 /* Remove the VSI list since it is no longer used */
7959 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7961 ice_debug(hw, ICE_DBG_SW,
7962 "Failed to remove VSI list %d, error %d\n",
7963 vsi_list_id, status);
7967 LIST_DEL(&vsi_list_info->list_entry);
7968 ice_free(hw, vsi_list_info);
7969 fm_list->vsi_list_info = NULL;
7976 * ice_rem_adv_rule - removes existing advanced switch rule
7977 * @hw: pointer to the hardware structure
7978 * @lkups: information on the words that needs to be looked up. All words
7979 * together makes one recipe
7980 * @lkups_cnt: num of entries in the lkups array
7981 * @rinfo: Its the pointer to the rule information for the rule
7983 * This function can be used to remove 1 rule at a time. The lkups is
7984 * used to describe all the words that forms the "lookup" portion of the
7985 * rule. These words can span multiple protocols. Callers to this function
7986 * need to pass in a list of protocol headers with lookup information along
7987 * and mask that determines which words are valid from the given protocol
7988 * header. rinfo describes other information related to this rule such as
7989 * forwarding IDs, priority of this rule, etc.
7992 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7993 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7995 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7996 struct ice_prot_lkup_ext lkup_exts;
7997 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7998 enum ice_status status = ICE_SUCCESS;
7999 bool remove_rule = false;
8000 u16 i, rid, vsi_handle;
8002 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8003 for (i = 0; i < lkups_cnt; i++) {
8006 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8009 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8014 /* Create any special protocol/offset pairs, such as looking at tunnel
8015 * bits by extracting metadata
8017 status = ice_add_special_words(rinfo, &lkup_exts);
8021 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8022 /* If did not find a recipe that match the existing criteria */
8023 if (rid == ICE_MAX_NUM_RECIPES)
8024 return ICE_ERR_PARAM;
8026 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8027 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8028 /* the rule is already removed */
8031 ice_acquire_lock(rule_lock);
8032 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8034 } else if (list_elem->vsi_count > 1) {
8035 remove_rule = false;
8036 vsi_handle = rinfo->sw_act.vsi_handle;
8037 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8039 vsi_handle = rinfo->sw_act.vsi_handle;
8040 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8042 ice_release_lock(rule_lock);
8045 if (list_elem->vsi_count == 0)
8048 ice_release_lock(rule_lock);
8050 struct ice_aqc_sw_rules_elem *s_rule;
8053 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8055 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
8058 return ICE_ERR_NO_MEMORY;
8059 s_rule->pdata.lkup_tx_rx.act = 0;
8060 s_rule->pdata.lkup_tx_rx.index =
8061 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8062 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8063 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8065 ice_aqc_opc_remove_sw_rules, NULL);
8066 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8067 struct ice_switch_info *sw = hw->switch_info;
8069 ice_acquire_lock(rule_lock);
8070 LIST_DEL(&list_elem->list_entry);
8071 ice_free(hw, list_elem->lkups);
8072 ice_free(hw, list_elem);
8073 ice_release_lock(rule_lock);
8074 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8075 sw->recp_list[rid].adv_rule = false;
8077 ice_free(hw, s_rule);
8083 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8084 * @hw: pointer to the hardware structure
8085 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8087 * This function is used to remove 1 rule at a time. The removal is based on
8088 * the remove_entry parameter. This function will remove rule for a given
8089 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8092 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8093 struct ice_rule_query_data *remove_entry)
8095 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8096 struct LIST_HEAD_TYPE *list_head;
8097 struct ice_adv_rule_info rinfo;
8098 struct ice_switch_info *sw;
8100 sw = hw->switch_info;
8101 if (!sw->recp_list[remove_entry->rid].recp_created)
8102 return ICE_ERR_PARAM;
8103 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8104 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8106 if (list_itr->rule_info.fltr_rule_id ==
8107 remove_entry->rule_id) {
8108 rinfo = list_itr->rule_info;
8109 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8110 return ice_rem_adv_rule(hw, list_itr->lkups,
8111 list_itr->lkups_cnt, &rinfo);
8114 /* either list is empty or unable to find rule */
8115 return ICE_ERR_DOES_NOT_EXIST;
8119 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8121 * @hw: pointer to the hardware structure
8122 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8124 * This function is used to remove all the rules for a given VSI and as soon
8125 * as removing a rule fails, it will return immediately with the error code,
8126 * else it will return ICE_SUCCESS
8128 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8130 struct ice_adv_fltr_mgmt_list_entry *list_itr;
8131 struct ice_vsi_list_map_info *map_info;
8132 struct LIST_HEAD_TYPE *list_head;
8133 struct ice_adv_rule_info rinfo;
8134 struct ice_switch_info *sw;
8135 enum ice_status status;
8136 u16 vsi_list_id = 0;
8139 sw = hw->switch_info;
8140 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8141 if (!sw->recp_list[rid].recp_created)
8143 if (!sw->recp_list[rid].adv_rule)
8145 list_head = &sw->recp_list[rid].filt_rules;
8147 LIST_FOR_EACH_ENTRY(list_itr, list_head,
8148 ice_adv_fltr_mgmt_list_entry, list_entry) {
8149 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
8154 rinfo = list_itr->rule_info;
8155 rinfo.sw_act.vsi_handle = vsi_handle;
8156 status = ice_rem_adv_rule(hw, list_itr->lkups,
8157 list_itr->lkups_cnt, &rinfo);
8167 * ice_replay_fltr - Replay all the filters stored by a specific list head
8168 * @hw: pointer to the hardware structure
8169 * @list_head: list for which filters needs to be replayed
8170 * @recp_id: Recipe ID for which rules need to be replayed
8172 static enum ice_status
8173 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8175 struct ice_fltr_mgmt_list_entry *itr;
8176 enum ice_status status = ICE_SUCCESS;
8177 struct ice_sw_recipe *recp_list;
8178 u8 lport = hw->port_info->lport;
8179 struct LIST_HEAD_TYPE l_head;
8181 if (LIST_EMPTY(list_head))
8184 recp_list = &hw->switch_info->recp_list[recp_id];
8185 /* Move entries from the given list_head to a temporary l_head so that
8186 * they can be replayed. Otherwise when trying to re-add the same
8187 * filter, the function will return already exists
8189 LIST_REPLACE_INIT(list_head, &l_head);
8191 /* Mark the given list_head empty by reinitializing it so filters
8192 * could be added again by *handler
8194 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8196 struct ice_fltr_list_entry f_entry;
8199 f_entry.fltr_info = itr->fltr_info;
8200 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8201 status = ice_add_rule_internal(hw, recp_list, lport,
8203 if (status != ICE_SUCCESS)
8208 /* Add a filter per VSI separately */
8209 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8211 if (!ice_is_vsi_valid(hw, vsi_handle))
8214 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8215 f_entry.fltr_info.vsi_handle = vsi_handle;
8216 f_entry.fltr_info.fwd_id.hw_vsi_id =
8217 ice_get_hw_vsi_num(hw, vsi_handle);
8218 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8219 if (recp_id == ICE_SW_LKUP_VLAN)
8220 status = ice_add_vlan_internal(hw, recp_list,
8223 status = ice_add_rule_internal(hw, recp_list,
8226 if (status != ICE_SUCCESS)
8231 /* Clear the filter management list */
8232 ice_rem_sw_rule_info(hw, &l_head);
8237 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8238 * @hw: pointer to the hardware structure
8240 * NOTE: This function does not clean up partially added filters on error.
8241 * It is up to caller of the function to issue a reset or fail early.
8243 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8245 struct ice_switch_info *sw = hw->switch_info;
8246 enum ice_status status = ICE_SUCCESS;
8249 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8250 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8252 status = ice_replay_fltr(hw, i, head);
8253 if (status != ICE_SUCCESS)
8260 * ice_replay_vsi_fltr - Replay filters for requested VSI
8261 * @hw: pointer to the hardware structure
8262 * @pi: pointer to port information structure
8263 * @sw: pointer to switch info struct for which function replays filters
8264 * @vsi_handle: driver VSI handle
8265 * @recp_id: Recipe ID for which rules need to be replayed
8266 * @list_head: list for which filters need to be replayed
8268 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8269 * It is required to pass valid VSI handle.
8271 static enum ice_status
8272 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8273 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8274 struct LIST_HEAD_TYPE *list_head)
8276 struct ice_fltr_mgmt_list_entry *itr;
8277 enum ice_status status = ICE_SUCCESS;
8278 struct ice_sw_recipe *recp_list;
8281 if (LIST_EMPTY(list_head))
8283 recp_list = &sw->recp_list[recp_id];
8284 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8286 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8288 struct ice_fltr_list_entry f_entry;
8290 f_entry.fltr_info = itr->fltr_info;
8291 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8292 itr->fltr_info.vsi_handle == vsi_handle) {
8293 /* update the src in case it is VSI num */
8294 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8295 f_entry.fltr_info.src = hw_vsi_id;
8296 status = ice_add_rule_internal(hw, recp_list,
8299 if (status != ICE_SUCCESS)
8303 if (!itr->vsi_list_info ||
8304 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8306 /* Clearing it so that the logic can add it back */
8307 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8308 f_entry.fltr_info.vsi_handle = vsi_handle;
8309 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8310 /* update the src in case it is VSI num */
8311 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8312 f_entry.fltr_info.src = hw_vsi_id;
8313 if (recp_id == ICE_SW_LKUP_VLAN)
8314 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8316 status = ice_add_rule_internal(hw, recp_list,
8319 if (status != ICE_SUCCESS)
8327 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8328 * @hw: pointer to the hardware structure
8329 * @vsi_handle: driver VSI handle
8330 * @list_head: list for which filters need to be replayed
8332 * Replay the advanced rule for the given VSI.
8334 static enum ice_status
8335 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8336 struct LIST_HEAD_TYPE *list_head)
8338 struct ice_rule_query_data added_entry = { 0 };
8339 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8340 enum ice_status status = ICE_SUCCESS;
8342 if (LIST_EMPTY(list_head))
8344 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8346 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8347 u16 lk_cnt = adv_fltr->lkups_cnt;
8349 if (vsi_handle != rinfo->sw_act.vsi_handle)
8351 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8360 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8361 * @hw: pointer to the hardware structure
8362 * @pi: pointer to port information structure
8363 * @vsi_handle: driver VSI handle
8365 * Replays filters for requested VSI via vsi_handle.
8368 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8371 struct ice_switch_info *sw = hw->switch_info;
8372 enum ice_status status;
8375 /* Update the recipes that were created */
8376 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8377 struct LIST_HEAD_TYPE *head;
8379 head = &sw->recp_list[i].filt_replay_rules;
8380 if (!sw->recp_list[i].adv_rule)
8381 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8384 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8385 if (status != ICE_SUCCESS)
8393 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8394 * @hw: pointer to the HW struct
8395 * @sw: pointer to switch info struct for which function removes filters
8397 * Deletes the filter replay rules for given switch
8399 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8406 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8407 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8408 struct LIST_HEAD_TYPE *l_head;
8410 l_head = &sw->recp_list[i].filt_replay_rules;
8411 if (!sw->recp_list[i].adv_rule)
8412 ice_rem_sw_rule_info(hw, l_head);
8414 ice_rem_adv_rule_info(hw, l_head);
8420 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8421 * @hw: pointer to the HW struct
8423 * Deletes the filter replay rules.
8425 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8427 ice_rm_sw_replay_rule_info(hw, hw->switch_info);