1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
16 #define ICE_TCP_PROTO_ID 0x06
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19 * struct to configure any switch filter rules.
20 * {DA (6 bytes), SA(6 bytes),
21 * Ether type (2 bytes for header without VLAN tag) OR
22 * VLAN tag (4 bytes for header with VLAN tag) }
24 * Word on Hardcoded values
25 * byte 0 = 0x2: to identify it as locally administered DA MAC
26 * byte 6 = 0x2: to identify it as locally administered SA MAC
27 * byte 12 = 0x81 & byte 13 = 0x00:
28 * In case of VLAN filter first two bytes defines ether type (0x8100)
29 * and remaining two bytes are placeholder for programming a given VLAN ID
30 * In case of Ether type filter it is treated as header without VLAN tag
31 * and byte 12 and 13 is used to program a given Ether type instead
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 struct ice_dummy_pkt_offsets {
38 enum ice_protocol_type type;
39 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
45 { ICE_IPV4_OFOS, 14 },
50 { ICE_PROTOCOL_LAST, 0 },
53 static const u8 dummy_gre_tcp_packet[] = {
54 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00,
58 0x08, 0x00, /* ICE_ETYPE_OL 12 */
60 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x2F, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00,
66 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67 0x00, 0x00, 0x00, 0x00,
69 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00,
74 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x06, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x50, 0x02, 0x20, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
90 { ICE_IPV4_OFOS, 14 },
95 { ICE_PROTOCOL_LAST, 0 },
98 static const u8 dummy_gre_udp_packet[] = {
99 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
103 0x08, 0x00, /* ICE_ETYPE_OL 12 */
105 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106 0x00, 0x00, 0x00, 0x00,
107 0x00, 0x2F, 0x00, 0x00,
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115 0x00, 0x00, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
119 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x11, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126 0x00, 0x08, 0x00, 0x00,
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
131 { ICE_ETYPE_OL, 12 },
132 { ICE_IPV4_OFOS, 14 },
136 { ICE_VXLAN_GPE, 42 },
140 { ICE_PROTOCOL_LAST, 0 },
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
145 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00,
148 0x08, 0x00, /* ICE_ETYPE_OL 12 */
150 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 0x00, 0x01, 0x00, 0x00,
152 0x40, 0x11, 0x00, 0x00,
153 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 0x00, 0x46, 0x00, 0x00,
159 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
167 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 0x00, 0x01, 0x00, 0x00,
169 0x40, 0x06, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
176 0x50, 0x02, 0x20, 0x00,
177 0x00, 0x00, 0x00, 0x00
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
182 { ICE_ETYPE_OL, 12 },
183 { ICE_IPV4_OFOS, 14 },
187 { ICE_VXLAN_GPE, 42 },
190 { ICE_UDP_ILOS, 84 },
191 { ICE_PROTOCOL_LAST, 0 },
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
196 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00,
199 0x08, 0x00, /* ICE_ETYPE_OL 12 */
201 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 0x00, 0x01, 0x00, 0x00,
203 0x00, 0x11, 0x00, 0x00,
204 0x00, 0x00, 0x00, 0x00,
205 0x00, 0x00, 0x00, 0x00,
207 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 0x00, 0x3a, 0x00, 0x00,
210 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
218 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 0x00, 0x01, 0x00, 0x00,
220 0x00, 0x11, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 0x00, 0x08, 0x00, 0x00,
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
231 { ICE_ETYPE_OL, 12 },
232 { ICE_IPV4_OFOS, 14 },
233 { ICE_UDP_ILOS, 34 },
234 { ICE_PROTOCOL_LAST, 0 },
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x08, 0x00, /* ICE_ETYPE_OL 12 */
245 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 0x00, 0x01, 0x00, 0x00,
247 0x00, 0x11, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 0x00, 0x08, 0x00, 0x00,
254 0x00, 0x00, /* 2 bytes for 4 byte alignment */
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
260 { ICE_ETYPE_OL, 12 },
261 { ICE_VLAN_OFOS, 14 },
262 { ICE_IPV4_OFOS, 18 },
263 { ICE_UDP_ILOS, 38 },
264 { ICE_PROTOCOL_LAST, 0 },
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00,
273 0x81, 0x00, /* ICE_ETYPE_OL 12 */
275 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
277 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 0x00, 0x01, 0x00, 0x00,
279 0x00, 0x11, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
283 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 0x00, 0x08, 0x00, 0x00,
286 0x00, 0x00, /* 2 bytes for 4 byte alignment */
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
292 { ICE_ETYPE_OL, 12 },
293 { ICE_IPV4_OFOS, 14 },
295 { ICE_PROTOCOL_LAST, 0 },
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x08, 0x00, /* ICE_ETYPE_OL 12 */
306 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 0x00, 0x01, 0x00, 0x00,
308 0x00, 0x06, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00,
315 0x50, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, /* 2 bytes for 4 byte alignment */
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
324 { ICE_ETYPE_OL, 12 },
325 { ICE_VLAN_OFOS, 14 },
326 { ICE_IPV4_OFOS, 18 },
328 { ICE_PROTOCOL_LAST, 0 },
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x81, 0x00, /* ICE_ETYPE_OL 12 */
339 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
341 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 0x00, 0x01, 0x00, 0x00,
343 0x00, 0x06, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00,
345 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00,
350 0x50, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, /* 2 bytes for 4 byte alignment */
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
358 { ICE_ETYPE_OL, 12 },
359 { ICE_IPV6_OFOS, 14 },
361 { ICE_PROTOCOL_LAST, 0 },
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
369 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
371 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00,
385 0x50, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, /* 2 bytes for 4 byte alignment */
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
395 { ICE_ETYPE_OL, 12 },
396 { ICE_VLAN_OFOS, 14 },
397 { ICE_IPV6_OFOS, 18 },
399 { ICE_PROTOCOL_LAST, 0 },
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x81, 0x00, /* ICE_ETYPE_OL 12 */
410 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
412 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00,
419 0x00, 0x00, 0x00, 0x00,
420 0x00, 0x00, 0x00, 0x00,
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
426 0x50, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x00, 0x00, /* 2 bytes for 4 byte alignment */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
435 { ICE_ETYPE_OL, 12 },
436 { ICE_IPV6_OFOS, 14 },
437 { ICE_UDP_ILOS, 54 },
438 { ICE_PROTOCOL_LAST, 0 },
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
449 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00,
457 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00,
460 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 0x00, 0x10, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, /* 2 bytes for 4 byte alignment */
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
473 { ICE_ETYPE_OL, 12 },
474 { ICE_VLAN_OFOS, 14 },
475 { ICE_IPV6_OFOS, 18 },
476 { ICE_UDP_ILOS, 58 },
477 { ICE_PROTOCOL_LAST, 0 },
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
486 0x81, 0x00, /* ICE_ETYPE_OL 12 */
488 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
490 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 0x00, 0x08, 0x00, 0x00,
504 0x00, 0x00, /* 2 bytes for 4 byte alignment */
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
509 { ICE_IPV4_OFOS, 14 },
512 { ICE_PROTOCOL_LAST, 0 },
515 static const u8 dummy_udp_gtp_packet[] = {
516 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
521 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x11, 0x00, 0x00,
524 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00,
527 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 0x00, 0x1c, 0x00, 0x00,
530 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 0x00, 0x00, 0x00, 0x00,
532 0x00, 0x00, 0x00, 0x85,
534 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 0x00, 0x00, 0x00, 0x00,
538 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
540 { ICE_ETYPE_OL, 12 },
541 { ICE_VLAN_OFOS, 14},
543 { ICE_PROTOCOL_LAST, 0 },
546 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
548 { ICE_ETYPE_OL, 12 },
549 { ICE_VLAN_OFOS, 14},
551 { ICE_IPV4_OFOS, 26 },
552 { ICE_PROTOCOL_LAST, 0 },
555 static const u8 dummy_pppoe_ipv4_packet[] = {
556 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
557 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x81, 0x00, /* ICE_ETYPE_OL 12 */
562 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
564 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
567 0x00, 0x21, /* PPP Link Layer 24 */
569 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
570 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
579 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
581 { ICE_ETYPE_OL, 12 },
582 { ICE_VLAN_OFOS, 14},
584 { ICE_IPV4_OFOS, 26 },
586 { ICE_PROTOCOL_LAST, 0 },
589 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
590 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
591 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00,
594 0x81, 0x00, /* ICE_ETYPE_OL 12 */
596 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
598 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
601 0x00, 0x21, /* PPP Link Layer 24 */
603 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
604 0x00, 0x01, 0x00, 0x00,
605 0x00, 0x06, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
610 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00,
612 0x50, 0x00, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
619 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
621 { ICE_ETYPE_OL, 12 },
622 { ICE_VLAN_OFOS, 14},
624 { ICE_IPV4_OFOS, 26 },
625 { ICE_UDP_ILOS, 46 },
626 { ICE_PROTOCOL_LAST, 0 },
629 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
630 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
631 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00,
634 0x81, 0x00, /* ICE_ETYPE_OL 12 */
636 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
638 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
641 0x00, 0x21, /* PPP Link Layer 24 */
643 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
644 0x00, 0x01, 0x00, 0x00,
645 0x00, 0x11, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
650 0x00, 0x08, 0x00, 0x00,
652 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
655 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
657 { ICE_ETYPE_OL, 12 },
658 { ICE_VLAN_OFOS, 14},
660 { ICE_IPV6_OFOS, 26 },
661 { ICE_PROTOCOL_LAST, 0 },
664 static const u8 dummy_pppoe_ipv6_packet[] = {
665 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
666 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00,
669 0x81, 0x00, /* ICE_ETYPE_OL 12 */
671 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
673 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
676 0x00, 0x57, /* PPP Link Layer 24 */
678 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
679 0x00, 0x00, 0x3b, 0x00,
680 0x00, 0x00, 0x00, 0x00,
681 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00,
683 0x00, 0x00, 0x00, 0x00,
684 0x00, 0x00, 0x00, 0x00,
685 0x00, 0x00, 0x00, 0x00,
686 0x00, 0x00, 0x00, 0x00,
687 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
693 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
695 { ICE_ETYPE_OL, 12 },
696 { ICE_VLAN_OFOS, 14},
698 { ICE_IPV6_OFOS, 26 },
700 { ICE_PROTOCOL_LAST, 0 },
703 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
704 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
705 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00,
708 0x81, 0x00, /* ICE_ETYPE_OL 12 */
710 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
712 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
715 0x00, 0x57, /* PPP Link Layer 24 */
717 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
718 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
721 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00,
723 0x00, 0x00, 0x00, 0x00,
724 0x00, 0x00, 0x00, 0x00,
725 0x00, 0x00, 0x00, 0x00,
726 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
729 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x00, 0x00,
731 0x50, 0x00, 0x00, 0x00,
732 0x00, 0x00, 0x00, 0x00,
734 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
738 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
740 { ICE_ETYPE_OL, 12 },
741 { ICE_VLAN_OFOS, 14},
743 { ICE_IPV6_OFOS, 26 },
744 { ICE_UDP_ILOS, 66 },
745 { ICE_PROTOCOL_LAST, 0 },
748 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
749 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
750 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x00, 0x00,
753 0x81, 0x00, /* ICE_ETYPE_OL 12 */
755 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
757 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
760 0x00, 0x57, /* PPP Link Layer 24 */
762 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
763 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
774 0x00, 0x08, 0x00, 0x00,
776 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
779 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
781 { ICE_IPV4_OFOS, 14 },
783 { ICE_PROTOCOL_LAST, 0 },
786 static const u8 dummy_ipv4_esp_pkt[] = {
787 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
788 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00,
792 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
793 0x00, 0x00, 0x40, 0x00,
794 0x40, 0x32, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00,
796 0x00, 0x00, 0x00, 0x00,
798 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
799 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
803 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
805 { ICE_IPV6_OFOS, 14 },
807 { ICE_PROTOCOL_LAST, 0 },
810 static const u8 dummy_ipv6_esp_pkt[] = {
811 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
812 0x00, 0x00, 0x00, 0x00,
813 0x00, 0x00, 0x00, 0x00,
816 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
817 0x00, 0x08, 0x32, 0x00, /* Next header ESP */
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
823 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00,
825 0x00, 0x00, 0x00, 0x00,
827 0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
828 0x00, 0x00, 0x00, 0x00,
829 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
832 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
834 { ICE_IPV4_OFOS, 14 },
836 { ICE_PROTOCOL_LAST, 0 },
839 static const u8 dummy_ipv4_ah_pkt[] = {
840 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
841 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00,
845 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
846 0x00, 0x00, 0x40, 0x00,
847 0x40, 0x33, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
857 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
859 { ICE_IPV6_OFOS, 14 },
861 { ICE_PROTOCOL_LAST, 0 },
864 static const u8 dummy_ipv6_ah_pkt[] = {
865 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
866 0x00, 0x00, 0x00, 0x00,
867 0x00, 0x00, 0x00, 0x00,
870 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
871 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
872 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x00, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
878 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, 0x00, 0x00,
881 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
882 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00,
884 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
887 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
889 { ICE_IPV4_OFOS, 14 },
890 { ICE_UDP_ILOS, 34 },
892 { ICE_PROTOCOL_LAST, 0 },
895 static const u8 dummy_ipv4_nat_pkt[] = {
896 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
897 0x00, 0x00, 0x00, 0x00,
898 0x00, 0x00, 0x00, 0x00,
901 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
902 0x00, 0x00, 0x40, 0x00,
903 0x40, 0x11, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
908 0x00, 0x00, 0x00, 0x00,
910 0x00, 0x00, 0x00, 0x00,
911 0x00, 0x00, 0x00, 0x00,
912 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
915 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
917 { ICE_IPV6_OFOS, 14 },
918 { ICE_UDP_ILOS, 54 },
920 { ICE_PROTOCOL_LAST, 0 },
923 static const u8 dummy_ipv6_nat_pkt[] = {
924 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
925 0x00, 0x00, 0x00, 0x00,
926 0x00, 0x00, 0x00, 0x00,
929 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
930 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
931 0x00, 0x00, 0x00, 0x00,
932 0x00, 0x00, 0x00, 0x00,
933 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00,
935 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00,
937 0x00, 0x00, 0x00, 0x00,
938 0x00, 0x00, 0x00, 0x00,
940 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
941 0x00, 0x00, 0x00, 0x00,
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00,
945 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
949 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
951 { ICE_IPV4_OFOS, 14 },
953 { ICE_PROTOCOL_LAST, 0 },
956 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
957 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
958 0x00, 0x00, 0x00, 0x00,
959 0x00, 0x00, 0x00, 0x00,
962 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
963 0x00, 0x00, 0x40, 0x00,
964 0x40, 0x73, 0x00, 0x00,
965 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
969 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
974 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
976 { ICE_IPV6_OFOS, 14 },
978 { ICE_PROTOCOL_LAST, 0 },
981 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
982 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
983 0x00, 0x00, 0x00, 0x00,
984 0x00, 0x00, 0x00, 0x00,
987 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
988 0x00, 0x0c, 0x73, 0x40,
989 0x00, 0x00, 0x00, 0x00,
990 0x00, 0x00, 0x00, 0x00,
991 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00,
993 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00,
995 0x00, 0x00, 0x00, 0x00,
996 0x00, 0x00, 0x00, 0x00,
998 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
999 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1004 /* this is a recipe to profile association bitmap */
1005 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1006 ICE_MAX_NUM_PROFILES);
1008 /* this is a profile to recipe association bitmap */
1009 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1010 ICE_MAX_NUM_RECIPES);
1012 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1015 * ice_collect_result_idx - copy result index values
1016 * @buf: buffer that contains the result index
1017 * @recp: the recipe struct to copy data into
1019 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1020 struct ice_sw_recipe *recp)
1022 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1023 ice_set_bit(buf->content.result_indx &
1024 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1028 * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1029 * @rid: recipe ID that we are populating
1031 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1033 u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1034 u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1035 u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1036 u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1037 enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1038 u16 i, j, profile_num = 0;
1039 bool non_tun_valid = false;
1040 bool pppoe_valid = false;
1041 bool vxlan_valid = false;
1042 bool gre_valid = false;
1043 bool gtp_valid = false;
1044 bool flag_valid = false;
1046 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1047 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1052 for (i = 0; i < 12; i++) {
1053 if (gre_profile[i] == j)
1057 for (i = 0; i < 12; i++) {
1058 if (vxlan_profile[i] == j)
1062 for (i = 0; i < 7; i++) {
1063 if (pppoe_profile[i] == j)
1067 for (i = 0; i < 6; i++) {
1068 if (non_tun_profile[i] == j)
1069 non_tun_valid = true;
1072 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1073 j <= ICE_PROFID_IPV6_GTPU_IPV6_OTHER)
1076 if (j >= ICE_PROFID_IPV4_ESP &&
1077 j <= ICE_PROFID_IPV6_PFCP_SESSION)
1081 if (!non_tun_valid && vxlan_valid)
1082 tun_type = ICE_SW_TUN_VXLAN;
1083 else if (!non_tun_valid && gre_valid)
1084 tun_type = ICE_SW_TUN_NVGRE;
1085 else if (!non_tun_valid && pppoe_valid)
1086 tun_type = ICE_SW_TUN_PPPOE;
1087 else if (!non_tun_valid && gtp_valid)
1088 tun_type = ICE_SW_TUN_GTP;
1089 else if (non_tun_valid &&
1090 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1091 tun_type = ICE_SW_TUN_AND_NON_TUN;
1092 else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1094 tun_type = ICE_NON_TUN;
1096 if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1097 i = ice_is_bit_set(recipe_to_profile[rid],
1098 ICE_PROFID_PPPOE_IPV4_OTHER);
1099 j = ice_is_bit_set(recipe_to_profile[rid],
1100 ICE_PROFID_PPPOE_IPV6_OTHER);
1102 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1104 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1107 if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1108 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1109 if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1111 case ICE_PROFID_IPV4_TCP:
1112 tun_type = ICE_SW_IPV4_TCP;
1114 case ICE_PROFID_IPV4_UDP:
1115 tun_type = ICE_SW_IPV4_UDP;
1117 case ICE_PROFID_IPV6_TCP:
1118 tun_type = ICE_SW_IPV6_TCP;
1120 case ICE_PROFID_IPV6_UDP:
1121 tun_type = ICE_SW_IPV6_UDP;
1123 case ICE_PROFID_PPPOE_PAY:
1124 tun_type = ICE_SW_TUN_PPPOE_PAY;
1126 case ICE_PROFID_PPPOE_IPV4_TCP:
1127 tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1129 case ICE_PROFID_PPPOE_IPV4_UDP:
1130 tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1132 case ICE_PROFID_PPPOE_IPV4_OTHER:
1133 tun_type = ICE_SW_TUN_PPPOE_IPV4;
1135 case ICE_PROFID_PPPOE_IPV6_TCP:
1136 tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1138 case ICE_PROFID_PPPOE_IPV6_UDP:
1139 tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1141 case ICE_PROFID_PPPOE_IPV6_OTHER:
1142 tun_type = ICE_SW_TUN_PPPOE_IPV6;
1144 case ICE_PROFID_IPV4_ESP:
1145 tun_type = ICE_SW_TUN_IPV4_ESP;
1147 case ICE_PROFID_IPV6_ESP:
1148 tun_type = ICE_SW_TUN_IPV6_ESP;
1150 case ICE_PROFID_IPV4_AH:
1151 tun_type = ICE_SW_TUN_IPV4_AH;
1153 case ICE_PROFID_IPV6_AH:
1154 tun_type = ICE_SW_TUN_IPV6_AH;
1156 case ICE_PROFID_IPV4_NAT_T:
1157 tun_type = ICE_SW_TUN_IPV4_NAT_T;
1159 case ICE_PROFID_IPV6_NAT_T:
1160 tun_type = ICE_SW_TUN_IPV6_NAT_T;
1162 case ICE_PROFID_IPV4_PFCP_NODE:
1164 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1166 case ICE_PROFID_IPV6_PFCP_NODE:
1168 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1170 case ICE_PROFID_IPV4_PFCP_SESSION:
1172 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1174 case ICE_PROFID_IPV6_PFCP_SESSION:
1176 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1178 case ICE_PROFID_MAC_IPV4_L2TPV3:
1179 tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1181 case ICE_PROFID_MAC_IPV6_L2TPV3:
1182 tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1197 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1198 * @hw: pointer to hardware structure
1199 * @recps: struct that we need to populate
1200 * @rid: recipe ID that we are populating
1201 * @refresh_required: true if we should get recipe to profile mapping from FW
1203 * This function is used to populate all the necessary entries into our
1204 * bookkeeping so that we have a current list of all the recipes that are
1205 * programmed in the firmware.
1207 static enum ice_status
1208 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1209 bool *refresh_required)
1211 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1212 struct ice_aqc_recipe_data_elem *tmp;
1213 u16 num_recps = ICE_MAX_NUM_RECIPES;
1214 struct ice_prot_lkup_ext *lkup_exts;
1215 enum ice_status status;
1219 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1221 /* we need a buffer big enough to accommodate all the recipes */
1222 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1223 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1225 return ICE_ERR_NO_MEMORY;
1227 tmp[0].recipe_indx = rid;
1228 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1229 /* non-zero status meaning recipe doesn't exist */
1233 /* Get recipe to profile map so that we can get the fv from lkups that
1234 * we read for a recipe from FW. Since we want to minimize the number of
1235 * times we make this FW call, just make one call and cache the copy
1236 * until a new recipe is added. This operation is only required the
1237 * first time to get the changes from FW. Then to search existing
1238 * entries we don't need to update the cache again until another recipe
1241 if (*refresh_required) {
1242 ice_get_recp_to_prof_map(hw);
1243 *refresh_required = false;
1246 /* Start populating all the entries for recps[rid] based on lkups from
1247 * firmware. Note that we are only creating the root recipe in our
1250 lkup_exts = &recps[rid].lkup_exts;
1252 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1253 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1254 struct ice_recp_grp_entry *rg_entry;
1255 u8 i, prof, idx, prot = 0;
1259 rg_entry = (struct ice_recp_grp_entry *)
1260 ice_malloc(hw, sizeof(*rg_entry));
1262 status = ICE_ERR_NO_MEMORY;
1266 idx = root_bufs.recipe_indx;
1267 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1269 /* Mark all result indices in this chain */
1270 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1271 ice_set_bit(root_bufs.content.result_indx &
1272 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1274 /* get the first profile that is associated with rid */
1275 prof = ice_find_first_bit(recipe_to_profile[idx],
1276 ICE_MAX_NUM_PROFILES);
1277 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1278 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1280 rg_entry->fv_idx[i] = lkup_indx;
1281 rg_entry->fv_mask[i] =
1282 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1284 /* If the recipe is a chained recipe then all its
1285 * child recipe's result will have a result index.
1286 * To fill fv_words we should not use those result
1287 * index, we only need the protocol ids and offsets.
1288 * We will skip all the fv_idx which stores result
1289 * index in them. We also need to skip any fv_idx which
1290 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1291 * valid offset value.
1293 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1294 rg_entry->fv_idx[i]) ||
1295 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1296 rg_entry->fv_idx[i] == 0)
1299 ice_find_prot_off(hw, ICE_BLK_SW, prof,
1300 rg_entry->fv_idx[i], &prot, &off);
1301 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1302 lkup_exts->fv_words[fv_word_idx].off = off;
1303 lkup_exts->field_mask[fv_word_idx] =
1304 rg_entry->fv_mask[i];
1307 /* populate rg_list with the data from the child entry of this
1310 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1312 /* Propagate some data to the recipe database */
1313 recps[idx].is_root = !!is_root;
1314 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1315 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1316 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1317 recps[idx].chain_idx = root_bufs.content.result_indx &
1318 ~ICE_AQ_RECIPE_RESULT_EN;
1319 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1321 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1327 /* Only do the following for root recipes entries */
1328 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1329 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1330 recps[idx].root_rid = root_bufs.content.rid &
1331 ~ICE_AQ_RECIPE_ID_IS_ROOT;
1332 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1335 /* Complete initialization of the root recipe entry */
1336 lkup_exts->n_val_words = fv_word_idx;
1337 recps[rid].big_recp = (num_recps > 1);
1338 recps[rid].n_grp_count = (u8)num_recps;
1339 recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1340 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1341 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1342 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1343 if (!recps[rid].root_buf)
1346 /* Copy result indexes */
1347 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1348 recps[rid].recp_created = true;
1356 * ice_get_recp_to_prof_map - updates recipe to profile mapping
1357 * @hw: pointer to hardware structure
1359 * This function is used to populate recipe_to_profile matrix where index to
1360 * this array is the recipe ID and the element is the mapping of which profiles
1361 * is this recipe mapped to.
1363 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1365 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1368 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1371 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1372 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1373 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1375 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1376 ICE_MAX_NUM_RECIPES);
1377 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1378 ice_set_bit(i, recipe_to_profile[j]);
1383 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1384 * @hw: pointer to the HW struct
1385 * @recp_list: pointer to sw recipe list
1387 * Allocate memory for the entire recipe table and initialize the structures/
1388 * entries corresponding to basic recipes.
1391 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1393 struct ice_sw_recipe *recps;
1396 recps = (struct ice_sw_recipe *)
1397 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1399 return ICE_ERR_NO_MEMORY;
1401 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1402 recps[i].root_rid = i;
1403 INIT_LIST_HEAD(&recps[i].filt_rules);
1404 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1405 INIT_LIST_HEAD(&recps[i].rg_list);
1406 ice_init_lock(&recps[i].filt_rule_lock);
1415 * ice_aq_get_sw_cfg - get switch configuration
1416 * @hw: pointer to the hardware structure
1417 * @buf: pointer to the result buffer
1418 * @buf_size: length of the buffer available for response
1419 * @req_desc: pointer to requested descriptor
1420 * @num_elems: pointer to number of elements
1421 * @cd: pointer to command details structure or NULL
1423 * Get switch configuration (0x0200) to be placed in buf.
1424 * This admin command returns information such as initial VSI/port number
1425 * and switch ID it belongs to.
1427 * NOTE: *req_desc is both an input/output parameter.
1428 * The caller of this function first calls this function with *request_desc set
1429 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1430 * configuration information has been returned; if non-zero (meaning not all
1431 * the information was returned), the caller should call this function again
1432 * with *req_desc set to the previous value returned by f/w to get the
1433 * next block of switch configuration information.
1435 * *num_elems is output only parameter. This reflects the number of elements
1436 * in response buffer. The caller of this function to use *num_elems while
1437 * parsing the response buffer.
1439 static enum ice_status
1440 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1441 u16 buf_size, u16 *req_desc, u16 *num_elems,
1442 struct ice_sq_cd *cd)
1444 struct ice_aqc_get_sw_cfg *cmd;
1445 struct ice_aq_desc desc;
1446 enum ice_status status;
1448 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1449 cmd = &desc.params.get_sw_conf;
1450 cmd->element = CPU_TO_LE16(*req_desc);
1452 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1454 *req_desc = LE16_TO_CPU(cmd->element);
1455 *num_elems = LE16_TO_CPU(cmd->num_elems);
1462 * ice_alloc_sw - allocate resources specific to switch
1463 * @hw: pointer to the HW struct
1464 * @ena_stats: true to turn on VEB stats
1465 * @shared_res: true for shared resource, false for dedicated resource
1466 * @sw_id: switch ID returned
1467 * @counter_id: VEB counter ID returned
1469 * allocates switch resources (SWID and VEB counter) (0x0208)
1472 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1475 struct ice_aqc_alloc_free_res_elem *sw_buf;
1476 struct ice_aqc_res_elem *sw_ele;
1477 enum ice_status status;
1480 buf_len = ice_struct_size(sw_buf, elem, 1);
1481 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1483 return ICE_ERR_NO_MEMORY;
1485 /* Prepare buffer for switch ID.
1486 * The number of resource entries in buffer is passed as 1 since only a
1487 * single switch/VEB instance is allocated, and hence a single sw_id
1490 sw_buf->num_elems = CPU_TO_LE16(1);
1492 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1493 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1494 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1496 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1497 ice_aqc_opc_alloc_res, NULL);
1500 goto ice_alloc_sw_exit;
1502 sw_ele = &sw_buf->elem[0];
1503 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1506 /* Prepare buffer for VEB Counter */
1507 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1508 struct ice_aqc_alloc_free_res_elem *counter_buf;
1509 struct ice_aqc_res_elem *counter_ele;
1511 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1512 ice_malloc(hw, buf_len);
1514 status = ICE_ERR_NO_MEMORY;
1515 goto ice_alloc_sw_exit;
1518 /* The number of resource entries in buffer is passed as 1 since
1519 * only a single switch/VEB instance is allocated, and hence a
1520 * single VEB counter is requested.
1522 counter_buf->num_elems = CPU_TO_LE16(1);
1523 counter_buf->res_type =
1524 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1525 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1526 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1530 ice_free(hw, counter_buf);
1531 goto ice_alloc_sw_exit;
1533 counter_ele = &counter_buf->elem[0];
1534 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1535 ice_free(hw, counter_buf);
1539 ice_free(hw, sw_buf);
1544 * ice_free_sw - free resources specific to switch
1545 * @hw: pointer to the HW struct
1546 * @sw_id: switch ID returned
1547 * @counter_id: VEB counter ID returned
1549 * free switch resources (SWID and VEB counter) (0x0209)
1551 * NOTE: This function frees multiple resources. It continues
1552 * releasing other resources even after it encounters error.
1553 * The error code returned is the last error it encountered.
1555 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1557 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1558 enum ice_status status, ret_status;
1561 buf_len = ice_struct_size(sw_buf, elem, 1);
1562 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1564 return ICE_ERR_NO_MEMORY;
1566 /* Prepare buffer to free for switch ID res.
1567 * The number of resource entries in buffer is passed as 1 since only a
1568 * single switch/VEB instance is freed, and hence a single sw_id
1571 sw_buf->num_elems = CPU_TO_LE16(1);
1572 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1573 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1575 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1576 ice_aqc_opc_free_res, NULL);
1579 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1581 /* Prepare buffer to free for VEB Counter resource */
1582 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1583 ice_malloc(hw, buf_len);
1585 ice_free(hw, sw_buf);
1586 return ICE_ERR_NO_MEMORY;
1589 /* The number of resource entries in buffer is passed as 1 since only a
1590 * single switch/VEB instance is freed, and hence a single VEB counter
1593 counter_buf->num_elems = CPU_TO_LE16(1);
1594 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1595 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1597 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1598 ice_aqc_opc_free_res, NULL);
1600 ice_debug(hw, ICE_DBG_SW,
1601 "VEB counter resource could not be freed\n");
1602 ret_status = status;
1605 ice_free(hw, counter_buf);
1606 ice_free(hw, sw_buf);
1612 * @hw: pointer to the HW struct
1613 * @vsi_ctx: pointer to a VSI context struct
1614 * @cd: pointer to command details structure or NULL
1616 * Add a VSI context to the hardware (0x0210)
1619 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1620 struct ice_sq_cd *cd)
1622 struct ice_aqc_add_update_free_vsi_resp *res;
1623 struct ice_aqc_add_get_update_free_vsi *cmd;
1624 struct ice_aq_desc desc;
1625 enum ice_status status;
1627 cmd = &desc.params.vsi_cmd;
1628 res = &desc.params.add_update_free_vsi_res;
1630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1632 if (!vsi_ctx->alloc_from_pool)
1633 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1634 ICE_AQ_VSI_IS_VALID);
1636 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1638 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1640 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1641 sizeof(vsi_ctx->info), cd);
1644 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1645 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1646 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1654 * @hw: pointer to the HW struct
1655 * @vsi_ctx: pointer to a VSI context struct
1656 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1657 * @cd: pointer to command details structure or NULL
1659 * Free VSI context info from hardware (0x0213)
1662 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1663 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1665 struct ice_aqc_add_update_free_vsi_resp *resp;
1666 struct ice_aqc_add_get_update_free_vsi *cmd;
1667 struct ice_aq_desc desc;
1668 enum ice_status status;
1670 cmd = &desc.params.vsi_cmd;
1671 resp = &desc.params.add_update_free_vsi_res;
1673 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1675 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1677 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1679 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1681 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1682 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1690 * @hw: pointer to the HW struct
1691 * @vsi_ctx: pointer to a VSI context struct
1692 * @cd: pointer to command details structure or NULL
1694 * Update VSI context in the hardware (0x0211)
1697 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1698 struct ice_sq_cd *cd)
1700 struct ice_aqc_add_update_free_vsi_resp *resp;
1701 struct ice_aqc_add_get_update_free_vsi *cmd;
1702 struct ice_aq_desc desc;
1703 enum ice_status status;
1705 cmd = &desc.params.vsi_cmd;
1706 resp = &desc.params.add_update_free_vsi_res;
1708 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1710 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1712 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1714 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1715 sizeof(vsi_ctx->info), cd);
1718 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1719 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1726 * ice_is_vsi_valid - check whether the VSI is valid or not
1727 * @hw: pointer to the HW struct
1728 * @vsi_handle: VSI handle
1730 * check whether the VSI is valid or not
1732 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1734 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1738 * ice_get_hw_vsi_num - return the HW VSI number
1739 * @hw: pointer to the HW struct
1740 * @vsi_handle: VSI handle
1742 * return the HW VSI number
1743 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1745 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1747 return hw->vsi_ctx[vsi_handle]->vsi_num;
1751 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1752 * @hw: pointer to the HW struct
1753 * @vsi_handle: VSI handle
1755 * return the VSI context entry for a given VSI handle
1757 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1759 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1763 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1764 * @hw: pointer to the HW struct
1765 * @vsi_handle: VSI handle
1766 * @vsi: VSI context pointer
1768 * save the VSI context entry for a given VSI handle
1771 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1773 hw->vsi_ctx[vsi_handle] = vsi;
1777 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1778 * @hw: pointer to the HW struct
1779 * @vsi_handle: VSI handle
1781 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1783 struct ice_vsi_ctx *vsi;
1786 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1789 ice_for_each_traffic_class(i) {
1790 if (vsi->lan_q_ctx[i]) {
1791 ice_free(hw, vsi->lan_q_ctx[i]);
1792 vsi->lan_q_ctx[i] = NULL;
1798 * ice_clear_vsi_ctx - clear the VSI context entry
1799 * @hw: pointer to the HW struct
1800 * @vsi_handle: VSI handle
1802 * clear the VSI context entry
1804 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1806 struct ice_vsi_ctx *vsi;
1808 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1810 ice_clear_vsi_q_ctx(hw, vsi_handle);
1812 hw->vsi_ctx[vsi_handle] = NULL;
1817 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1818 * @hw: pointer to the HW struct
1820 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1824 for (i = 0; i < ICE_MAX_VSI; i++)
1825 ice_clear_vsi_ctx(hw, i);
1829 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1830 * @hw: pointer to the HW struct
1831 * @vsi_handle: unique VSI handle provided by drivers
1832 * @vsi_ctx: pointer to a VSI context struct
1833 * @cd: pointer to command details structure or NULL
1835 * Add a VSI context to the hardware also add it into the VSI handle list.
1836 * If this function gets called after reset for existing VSIs then update
1837 * with the new HW VSI number in the corresponding VSI handle list entry.
1840 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1841 struct ice_sq_cd *cd)
1843 struct ice_vsi_ctx *tmp_vsi_ctx;
1844 enum ice_status status;
1846 if (vsi_handle >= ICE_MAX_VSI)
1847 return ICE_ERR_PARAM;
1848 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1851 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1853 /* Create a new VSI context */
1854 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1855 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1857 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1858 return ICE_ERR_NO_MEMORY;
1860 *tmp_vsi_ctx = *vsi_ctx;
1862 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1864 /* update with new HW VSI num */
1865 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1872 * ice_free_vsi- free VSI context from hardware and VSI handle list
1873 * @hw: pointer to the HW struct
1874 * @vsi_handle: unique VSI handle
1875 * @vsi_ctx: pointer to a VSI context struct
1876 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1877 * @cd: pointer to command details structure or NULL
1879 * Free VSI context info from hardware as well as from VSI handle list
1882 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1883 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1885 enum ice_status status;
1887 if (!ice_is_vsi_valid(hw, vsi_handle))
1888 return ICE_ERR_PARAM;
1889 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1890 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1892 ice_clear_vsi_ctx(hw, vsi_handle);
1898 * @hw: pointer to the HW struct
1899 * @vsi_handle: unique VSI handle
1900 * @vsi_ctx: pointer to a VSI context struct
1901 * @cd: pointer to command details structure or NULL
1903 * Update VSI context in the hardware
1906 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1907 struct ice_sq_cd *cd)
1909 if (!ice_is_vsi_valid(hw, vsi_handle))
1910 return ICE_ERR_PARAM;
1911 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1912 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1916 * ice_aq_get_vsi_params
1917 * @hw: pointer to the HW struct
1918 * @vsi_ctx: pointer to a VSI context struct
1919 * @cd: pointer to command details structure or NULL
1921 * Get VSI context info from hardware (0x0212)
1924 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1925 struct ice_sq_cd *cd)
1927 struct ice_aqc_add_get_update_free_vsi *cmd;
1928 struct ice_aqc_get_vsi_resp *resp;
1929 struct ice_aq_desc desc;
1930 enum ice_status status;
1932 cmd = &desc.params.vsi_cmd;
1933 resp = &desc.params.get_vsi_resp;
1935 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1937 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1939 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1940 sizeof(vsi_ctx->info), cd);
1942 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1944 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1945 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1952 * ice_aq_add_update_mir_rule - add/update a mirror rule
1953 * @hw: pointer to the HW struct
1954 * @rule_type: Rule Type
1955 * @dest_vsi: VSI number to which packets will be mirrored
1956 * @count: length of the list
1957 * @mr_buf: buffer for list of mirrored VSI numbers
1958 * @cd: pointer to command details structure or NULL
1961 * Add/Update Mirror Rule (0x260).
1964 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1965 u16 count, struct ice_mir_rule_buf *mr_buf,
1966 struct ice_sq_cd *cd, u16 *rule_id)
1968 struct ice_aqc_add_update_mir_rule *cmd;
1969 struct ice_aq_desc desc;
1970 enum ice_status status;
1971 __le16 *mr_list = NULL;
1974 switch (rule_type) {
1975 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1976 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1977 /* Make sure count and mr_buf are set for these rule_types */
1978 if (!(count && mr_buf))
1979 return ICE_ERR_PARAM;
1981 buf_size = count * sizeof(__le16);
1982 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1984 return ICE_ERR_NO_MEMORY;
1986 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1987 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1988 /* Make sure count and mr_buf are not set for these
1991 if (count || mr_buf)
1992 return ICE_ERR_PARAM;
1995 ice_debug(hw, ICE_DBG_SW,
1996 "Error due to unsupported rule_type %u\n", rule_type);
1997 return ICE_ERR_OUT_OF_RANGE;
2000 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2002 /* Pre-process 'mr_buf' items for add/update of virtual port
2003 * ingress/egress mirroring (but not physical port ingress/egress
2009 for (i = 0; i < count; i++) {
2012 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2014 /* Validate specified VSI number, make sure it is less
2015 * than ICE_MAX_VSI, if not return with error.
2017 if (id >= ICE_MAX_VSI) {
2018 ice_debug(hw, ICE_DBG_SW,
2019 "Error VSI index (%u) out-of-range\n",
2021 ice_free(hw, mr_list);
2022 return ICE_ERR_OUT_OF_RANGE;
2025 /* add VSI to mirror rule */
2028 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2029 else /* remove VSI from mirror rule */
2030 mr_list[i] = CPU_TO_LE16(id);
2034 cmd = &desc.params.add_update_rule;
2035 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2036 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2037 ICE_AQC_RULE_ID_VALID_M);
2038 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2039 cmd->num_entries = CPU_TO_LE16(count);
2040 cmd->dest = CPU_TO_LE16(dest_vsi);
2042 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2044 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2046 ice_free(hw, mr_list);
2052 * ice_aq_delete_mir_rule - delete a mirror rule
2053 * @hw: pointer to the HW struct
2054 * @rule_id: Mirror rule ID (to be deleted)
2055 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2056 * otherwise it is returned to the shared pool
2057 * @cd: pointer to command details structure or NULL
2059 * Delete Mirror Rule (0x261).
2062 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2063 struct ice_sq_cd *cd)
2065 struct ice_aqc_delete_mir_rule *cmd;
2066 struct ice_aq_desc desc;
2068 /* rule_id should be in the range 0...63 */
2069 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2070 return ICE_ERR_OUT_OF_RANGE;
2072 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2074 cmd = &desc.params.del_rule;
2075 rule_id |= ICE_AQC_RULE_ID_VALID_M;
2076 cmd->rule_id = CPU_TO_LE16(rule_id);
2079 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2081 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2085 * ice_aq_alloc_free_vsi_list
2086 * @hw: pointer to the HW struct
2087 * @vsi_list_id: VSI list ID returned or used for lookup
2088 * @lkup_type: switch rule filter lookup type
2089 * @opc: switch rules population command type - pass in the command opcode
2091 * allocates or free a VSI list resource
2093 static enum ice_status
2094 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2095 enum ice_sw_lkup_type lkup_type,
2096 enum ice_adminq_opc opc)
2098 struct ice_aqc_alloc_free_res_elem *sw_buf;
2099 struct ice_aqc_res_elem *vsi_ele;
2100 enum ice_status status;
2103 buf_len = ice_struct_size(sw_buf, elem, 1);
2104 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2106 return ICE_ERR_NO_MEMORY;
2107 sw_buf->num_elems = CPU_TO_LE16(1);
2109 if (lkup_type == ICE_SW_LKUP_MAC ||
2110 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2111 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2112 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2113 lkup_type == ICE_SW_LKUP_PROMISC ||
2114 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2115 lkup_type == ICE_SW_LKUP_LAST) {
2116 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2117 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2119 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2121 status = ICE_ERR_PARAM;
2122 goto ice_aq_alloc_free_vsi_list_exit;
2125 if (opc == ice_aqc_opc_free_res)
2126 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2128 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2130 goto ice_aq_alloc_free_vsi_list_exit;
2132 if (opc == ice_aqc_opc_alloc_res) {
2133 vsi_ele = &sw_buf->elem[0];
2134 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2137 ice_aq_alloc_free_vsi_list_exit:
2138 ice_free(hw, sw_buf);
2143 * ice_aq_set_storm_ctrl - Sets storm control configuration
2144 * @hw: pointer to the HW struct
2145 * @bcast_thresh: represents the upper threshold for broadcast storm control
2146 * @mcast_thresh: represents the upper threshold for multicast storm control
2147 * @ctl_bitmask: storm control control knobs
2149 * Sets the storm control configuration (0x0280)
2152 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2155 struct ice_aqc_storm_cfg *cmd;
2156 struct ice_aq_desc desc;
2158 cmd = &desc.params.storm_conf;
2160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2162 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2163 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2164 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2166 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2170 * ice_aq_get_storm_ctrl - gets storm control configuration
2171 * @hw: pointer to the HW struct
2172 * @bcast_thresh: represents the upper threshold for broadcast storm control
2173 * @mcast_thresh: represents the upper threshold for multicast storm control
2174 * @ctl_bitmask: storm control control knobs
2176 * Gets the storm control configuration (0x0281)
2179 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2182 enum ice_status status;
2183 struct ice_aq_desc desc;
2185 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2187 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2189 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2192 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2195 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2198 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2205 * ice_aq_sw_rules - add/update/remove switch rules
2206 * @hw: pointer to the HW struct
2207 * @rule_list: pointer to switch rule population list
2208 * @rule_list_sz: total size of the rule list in bytes
2209 * @num_rules: number of switch rules in the rule_list
2210 * @opc: switch rules population command type - pass in the command opcode
2211 * @cd: pointer to command details structure or NULL
2213 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2215 static enum ice_status
2216 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2217 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2219 struct ice_aq_desc desc;
2220 enum ice_status status;
2222 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2224 if (opc != ice_aqc_opc_add_sw_rules &&
2225 opc != ice_aqc_opc_update_sw_rules &&
2226 opc != ice_aqc_opc_remove_sw_rules)
2227 return ICE_ERR_PARAM;
2229 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2231 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2232 desc.params.sw_rules.num_rules_fltr_entry_index =
2233 CPU_TO_LE16(num_rules);
2234 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2235 if (opc != ice_aqc_opc_add_sw_rules &&
2236 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2237 status = ICE_ERR_DOES_NOT_EXIST;
2243 * ice_aq_add_recipe - add switch recipe
2244 * @hw: pointer to the HW struct
2245 * @s_recipe_list: pointer to switch rule population list
2246 * @num_recipes: number of switch recipes in the list
2247 * @cd: pointer to command details structure or NULL
2252 ice_aq_add_recipe(struct ice_hw *hw,
2253 struct ice_aqc_recipe_data_elem *s_recipe_list,
2254 u16 num_recipes, struct ice_sq_cd *cd)
2256 struct ice_aqc_add_get_recipe *cmd;
2257 struct ice_aq_desc desc;
2260 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2261 cmd = &desc.params.add_get_recipe;
2262 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2264 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2265 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2267 buf_size = num_recipes * sizeof(*s_recipe_list);
2269 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2273 * ice_aq_get_recipe - get switch recipe
2274 * @hw: pointer to the HW struct
2275 * @s_recipe_list: pointer to switch rule population list
2276 * @num_recipes: pointer to the number of recipes (input and output)
2277 * @recipe_root: root recipe number of recipe(s) to retrieve
2278 * @cd: pointer to command details structure or NULL
2282 * On input, *num_recipes should equal the number of entries in s_recipe_list.
2283 * On output, *num_recipes will equal the number of entries returned in
2286 * The caller must supply enough space in s_recipe_list to hold all possible
2287 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2290 ice_aq_get_recipe(struct ice_hw *hw,
2291 struct ice_aqc_recipe_data_elem *s_recipe_list,
2292 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2294 struct ice_aqc_add_get_recipe *cmd;
2295 struct ice_aq_desc desc;
2296 enum ice_status status;
2299 if (*num_recipes != ICE_MAX_NUM_RECIPES)
2300 return ICE_ERR_PARAM;
2302 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2303 cmd = &desc.params.add_get_recipe;
2304 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2306 cmd->return_index = CPU_TO_LE16(recipe_root);
2307 cmd->num_sub_recipes = 0;
2309 buf_size = *num_recipes * sizeof(*s_recipe_list);
2311 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2312 /* cppcheck-suppress constArgument */
2313 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2319 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2320 * @hw: pointer to the HW struct
2321 * @profile_id: package profile ID to associate the recipe with
2322 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2323 * @cd: pointer to command details structure or NULL
2324 * Recipe to profile association (0x0291)
2327 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2328 struct ice_sq_cd *cd)
2330 struct ice_aqc_recipe_to_profile *cmd;
2331 struct ice_aq_desc desc;
2333 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2334 cmd = &desc.params.recipe_to_profile;
2335 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2336 cmd->profile_id = CPU_TO_LE16(profile_id);
2337 /* Set the recipe ID bit in the bitmask to let the device know which
2338 * profile we are associating the recipe to
2340 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2341 ICE_NONDMA_TO_NONDMA);
2343 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2347 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2348 * @hw: pointer to the HW struct
2349 * @profile_id: package profile ID to associate the recipe with
2350 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2351 * @cd: pointer to command details structure or NULL
2352 * Associate profile ID with given recipe (0x0293)
2355 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2356 struct ice_sq_cd *cd)
2358 struct ice_aqc_recipe_to_profile *cmd;
2359 struct ice_aq_desc desc;
2360 enum ice_status status;
2362 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2363 cmd = &desc.params.recipe_to_profile;
2364 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2365 cmd->profile_id = CPU_TO_LE16(profile_id);
2367 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2369 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2370 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2376 * ice_alloc_recipe - add recipe resource
2377 * @hw: pointer to the hardware structure
2378 * @rid: recipe ID returned as response to AQ call
2380 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2382 struct ice_aqc_alloc_free_res_elem *sw_buf;
2383 enum ice_status status;
2386 buf_len = ice_struct_size(sw_buf, elem, 1);
2387 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2389 return ICE_ERR_NO_MEMORY;
2391 sw_buf->num_elems = CPU_TO_LE16(1);
2392 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2393 ICE_AQC_RES_TYPE_S) |
2394 ICE_AQC_RES_TYPE_FLAG_SHARED);
2395 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2396 ice_aqc_opc_alloc_res, NULL);
2398 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2399 ice_free(hw, sw_buf);
2404 /* ice_init_port_info - Initialize port_info with switch configuration data
2405 * @pi: pointer to port_info
2406 * @vsi_port_num: VSI number or port number
2407 * @type: Type of switch element (port or VSI)
2408 * @swid: switch ID of the switch the element is attached to
2409 * @pf_vf_num: PF or VF number
2410 * @is_vf: true if the element is a VF, false otherwise
2413 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2414 u16 swid, u16 pf_vf_num, bool is_vf)
2417 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2418 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2420 pi->pf_vf_num = pf_vf_num;
2422 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2423 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2426 ice_debug(pi->hw, ICE_DBG_SW,
2427 "incorrect VSI/port type received\n");
2432 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2433 * @hw: pointer to the hardware structure
2435 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2437 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2438 enum ice_status status;
2445 num_total_ports = 1;
2447 rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2448 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2451 return ICE_ERR_NO_MEMORY;
2453 /* Multiple calls to ice_aq_get_sw_cfg may be required
2454 * to get all the switch configuration information. The need
2455 * for additional calls is indicated by ice_aq_get_sw_cfg
2456 * writing a non-zero value in req_desc
2459 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2461 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2462 &req_desc, &num_elems, NULL);
2467 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2468 u16 pf_vf_num, swid, vsi_port_num;
2472 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2473 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2475 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2476 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2478 swid = LE16_TO_CPU(ele->swid);
2480 if (LE16_TO_CPU(ele->pf_vf_num) &
2481 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2484 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2485 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2488 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2489 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2490 if (j == num_total_ports) {
2491 ice_debug(hw, ICE_DBG_SW,
2492 "more ports than expected\n");
2493 status = ICE_ERR_CFG;
2496 ice_init_port_info(hw->port_info,
2497 vsi_port_num, res_type, swid,
2505 } while (req_desc && !status);
2508 ice_free(hw, (void *)rbuf);
2513 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2514 * @hw: pointer to the hardware structure
2515 * @fi: filter info structure to fill/update
2517 * This helper function populates the lb_en and lan_en elements of the provided
2518 * ice_fltr_info struct using the switch's type and characteristics of the
2519 * switch rule being configured.
2521 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2523 if ((fi->flag & ICE_FLTR_RX) &&
2524 (fi->fltr_act == ICE_FWD_TO_VSI ||
2525 fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2526 fi->lkup_type == ICE_SW_LKUP_LAST)
2530 if ((fi->flag & ICE_FLTR_TX) &&
2531 (fi->fltr_act == ICE_FWD_TO_VSI ||
2532 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2533 fi->fltr_act == ICE_FWD_TO_Q ||
2534 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2535 /* Setting LB for prune actions will result in replicated
2536 * packets to the internal switch that will be dropped.
2538 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2541 /* Set lan_en to TRUE if
2542 * 1. The switch is a VEB AND
2544 * 2.1 The lookup is a directional lookup like ethertype,
2545 * promiscuous, ethertype-MAC, promiscuous-VLAN
2546 * and default-port OR
2547 * 2.2 The lookup is VLAN, OR
2548 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2549 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2553 * The switch is a VEPA.
2555 * In all other cases, the LAN enable has to be set to false.
2558 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2559 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2560 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2561 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2562 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2563 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2564 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2565 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2566 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2567 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2576 * ice_fill_sw_rule - Helper function to fill switch rule structure
2577 * @hw: pointer to the hardware structure
2578 * @f_info: entry containing packet forwarding information
2579 * @s_rule: switch rule structure to be filled in based on mac_entry
2580 * @opc: switch rules population command type - pass in the command opcode
2583 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2584 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2586 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2594 if (opc == ice_aqc_opc_remove_sw_rules) {
2595 s_rule->pdata.lkup_tx_rx.act = 0;
2596 s_rule->pdata.lkup_tx_rx.index =
2597 CPU_TO_LE16(f_info->fltr_rule_id);
2598 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2602 eth_hdr_sz = sizeof(dummy_eth_header);
2603 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2605 /* initialize the ether header with a dummy header */
2606 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2607 ice_fill_sw_info(hw, f_info);
2609 switch (f_info->fltr_act) {
2610 case ICE_FWD_TO_VSI:
2611 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2612 ICE_SINGLE_ACT_VSI_ID_M;
2613 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2614 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2615 ICE_SINGLE_ACT_VALID_BIT;
2617 case ICE_FWD_TO_VSI_LIST:
2618 act |= ICE_SINGLE_ACT_VSI_LIST;
2619 act |= (f_info->fwd_id.vsi_list_id <<
2620 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2621 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2622 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2623 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2624 ICE_SINGLE_ACT_VALID_BIT;
2627 act |= ICE_SINGLE_ACT_TO_Q;
2628 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2629 ICE_SINGLE_ACT_Q_INDEX_M;
2631 case ICE_DROP_PACKET:
2632 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2633 ICE_SINGLE_ACT_VALID_BIT;
2635 case ICE_FWD_TO_QGRP:
2636 q_rgn = f_info->qgrp_size > 0 ?
2637 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2638 act |= ICE_SINGLE_ACT_TO_Q;
2639 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2640 ICE_SINGLE_ACT_Q_INDEX_M;
2641 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2642 ICE_SINGLE_ACT_Q_REGION_M;
2649 act |= ICE_SINGLE_ACT_LB_ENABLE;
2651 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2653 switch (f_info->lkup_type) {
2654 case ICE_SW_LKUP_MAC:
2655 daddr = f_info->l_data.mac.mac_addr;
2657 case ICE_SW_LKUP_VLAN:
2658 vlan_id = f_info->l_data.vlan.vlan_id;
2659 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2660 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2661 act |= ICE_SINGLE_ACT_PRUNE;
2662 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2665 case ICE_SW_LKUP_ETHERTYPE_MAC:
2666 daddr = f_info->l_data.ethertype_mac.mac_addr;
2668 case ICE_SW_LKUP_ETHERTYPE:
2669 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2670 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2672 case ICE_SW_LKUP_MAC_VLAN:
2673 daddr = f_info->l_data.mac_vlan.mac_addr;
2674 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2676 case ICE_SW_LKUP_PROMISC_VLAN:
2677 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2679 case ICE_SW_LKUP_PROMISC:
2680 daddr = f_info->l_data.mac_vlan.mac_addr;
2686 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2687 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2688 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2690 /* Recipe set depending on lookup type */
2691 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2692 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2693 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2696 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2697 ICE_NONDMA_TO_NONDMA);
2699 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2700 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2701 *off = CPU_TO_BE16(vlan_id);
2704 /* Create the switch rule with the final dummy Ethernet header */
2705 if (opc != ice_aqc_opc_update_sw_rules)
2706 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2710 * ice_add_marker_act
2711 * @hw: pointer to the hardware structure
2712 * @m_ent: the management entry for which sw marker needs to be added
2713 * @sw_marker: sw marker to tag the Rx descriptor with
2714 * @l_id: large action resource ID
2716 * Create a large action to hold software marker and update the switch rule
2717 * entry pointed by m_ent with newly created large action
2719 static enum ice_status
2720 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2721 u16 sw_marker, u16 l_id)
2723 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2724 /* For software marker we need 3 large actions
2725 * 1. FWD action: FWD TO VSI or VSI LIST
2726 * 2. GENERIC VALUE action to hold the profile ID
2727 * 3. GENERIC VALUE action to hold the software marker ID
2729 const u16 num_lg_acts = 3;
2730 enum ice_status status;
2736 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2737 return ICE_ERR_PARAM;
2739 /* Create two back-to-back switch rules and submit them to the HW using
2740 * one memory buffer:
2744 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2745 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2746 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2748 return ICE_ERR_NO_MEMORY;
2750 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2752 /* Fill in the first switch rule i.e. large action */
2753 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2754 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2755 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2757 /* First action VSI forwarding or VSI list forwarding depending on how
2760 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2761 m_ent->fltr_info.fwd_id.hw_vsi_id;
2763 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2764 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2765 ICE_LG_ACT_VSI_LIST_ID_M;
2766 if (m_ent->vsi_count > 1)
2767 act |= ICE_LG_ACT_VSI_LIST;
2768 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2770 /* Second action descriptor type */
2771 act = ICE_LG_ACT_GENERIC;
2773 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2774 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2776 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2777 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2779 /* Third action Marker value */
2780 act |= ICE_LG_ACT_GENERIC;
2781 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2782 ICE_LG_ACT_GENERIC_VALUE_M;
2784 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2786 /* call the fill switch rule to fill the lookup Tx Rx structure */
2787 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2788 ice_aqc_opc_update_sw_rules);
2790 /* Update the action to point to the large action ID */
2791 rx_tx->pdata.lkup_tx_rx.act =
2792 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2793 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2794 ICE_SINGLE_ACT_PTR_VAL_M));
2796 /* Use the filter rule ID of the previously created rule with single
2797 * act. Once the update happens, hardware will treat this as large
2800 rx_tx->pdata.lkup_tx_rx.index =
2801 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2803 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2804 ice_aqc_opc_update_sw_rules, NULL);
2806 m_ent->lg_act_idx = l_id;
2807 m_ent->sw_marker_id = sw_marker;
2810 ice_free(hw, lg_act);
2815 * ice_add_counter_act - add/update filter rule with counter action
2816 * @hw: pointer to the hardware structure
2817 * @m_ent: the management entry for which counter needs to be added
2818 * @counter_id: VLAN counter ID returned as part of allocate resource
2819 * @l_id: large action resource ID
2821 static enum ice_status
2822 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2823 u16 counter_id, u16 l_id)
2825 struct ice_aqc_sw_rules_elem *lg_act;
2826 struct ice_aqc_sw_rules_elem *rx_tx;
2827 enum ice_status status;
2828 /* 2 actions will be added while adding a large action counter */
2829 const int num_acts = 2;
2836 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2837 return ICE_ERR_PARAM;
2839 /* Create two back-to-back switch rules and submit them to the HW using
2840 * one memory buffer:
2844 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2845 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2846 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2849 return ICE_ERR_NO_MEMORY;
2851 rx_tx = (struct ice_aqc_sw_rules_elem *)
2852 ((u8 *)lg_act + lg_act_size);
2854 /* Fill in the first switch rule i.e. large action */
2855 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2856 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2857 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2859 /* First action VSI forwarding or VSI list forwarding depending on how
2862 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2863 m_ent->fltr_info.fwd_id.hw_vsi_id;
2865 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2866 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2867 ICE_LG_ACT_VSI_LIST_ID_M;
2868 if (m_ent->vsi_count > 1)
2869 act |= ICE_LG_ACT_VSI_LIST;
2870 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2872 /* Second action counter ID */
2873 act = ICE_LG_ACT_STAT_COUNT;
2874 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2875 ICE_LG_ACT_STAT_COUNT_M;
2876 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2878 /* call the fill switch rule to fill the lookup Tx Rx structure */
2879 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2880 ice_aqc_opc_update_sw_rules);
2882 act = ICE_SINGLE_ACT_PTR;
2883 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2884 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2886 /* Use the filter rule ID of the previously created rule with single
2887 * act. Once the update happens, hardware will treat this as large
2890 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2891 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2893 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2894 ice_aqc_opc_update_sw_rules, NULL);
2896 m_ent->lg_act_idx = l_id;
2897 m_ent->counter_index = counter_id;
2900 ice_free(hw, lg_act);
2905 * ice_create_vsi_list_map
2906 * @hw: pointer to the hardware structure
2907 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2908 * @num_vsi: number of VSI handles in the array
2909 * @vsi_list_id: VSI list ID generated as part of allocate resource
2911 * Helper function to create a new entry of VSI list ID to VSI mapping
2912 * using the given VSI list ID
2914 static struct ice_vsi_list_map_info *
2915 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2918 struct ice_switch_info *sw = hw->switch_info;
2919 struct ice_vsi_list_map_info *v_map;
2922 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2927 v_map->vsi_list_id = vsi_list_id;
2929 for (i = 0; i < num_vsi; i++)
2930 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2932 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2937 * ice_update_vsi_list_rule
2938 * @hw: pointer to the hardware structure
2939 * @vsi_handle_arr: array of VSI handles to form a VSI list
2940 * @num_vsi: number of VSI handles in the array
2941 * @vsi_list_id: VSI list ID generated as part of allocate resource
2942 * @remove: Boolean value to indicate if this is a remove action
2943 * @opc: switch rules population command type - pass in the command opcode
2944 * @lkup_type: lookup type of the filter
2946 * Call AQ command to add a new switch rule or update existing switch rule
2947 * using the given VSI list ID
2949 static enum ice_status
2950 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2951 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2952 enum ice_sw_lkup_type lkup_type)
2954 struct ice_aqc_sw_rules_elem *s_rule;
2955 enum ice_status status;
2961 return ICE_ERR_PARAM;
2963 if (lkup_type == ICE_SW_LKUP_MAC ||
2964 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2965 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2966 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2967 lkup_type == ICE_SW_LKUP_PROMISC ||
2968 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2969 lkup_type == ICE_SW_LKUP_LAST)
2970 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2971 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2972 else if (lkup_type == ICE_SW_LKUP_VLAN)
2973 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2974 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2976 return ICE_ERR_PARAM;
2978 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2979 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2981 return ICE_ERR_NO_MEMORY;
2982 for (i = 0; i < num_vsi; i++) {
2983 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2984 status = ICE_ERR_PARAM;
2987 /* AQ call requires hw_vsi_id(s) */
2988 s_rule->pdata.vsi_list.vsi[i] =
2989 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2992 s_rule->type = CPU_TO_LE16(rule_type);
2993 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2994 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2996 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2999 ice_free(hw, s_rule);
3004 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3005 * @hw: pointer to the HW struct
3006 * @vsi_handle_arr: array of VSI handles to form a VSI list
3007 * @num_vsi: number of VSI handles in the array
3008 * @vsi_list_id: stores the ID of the VSI list to be created
3009 * @lkup_type: switch rule filter's lookup type
3011 static enum ice_status
3012 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3013 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3015 enum ice_status status;
3017 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3018 ice_aqc_opc_alloc_res);
3022 /* Update the newly created VSI list to include the specified VSIs */
3023 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3024 *vsi_list_id, false,
3025 ice_aqc_opc_add_sw_rules, lkup_type);
3029 * ice_create_pkt_fwd_rule
3030 * @hw: pointer to the hardware structure
3031 * @recp_list: corresponding filter management list
3032 * @f_entry: entry containing packet forwarding information
3034 * Create switch rule with given filter information and add an entry
3035 * to the corresponding filter management list to track this switch rule
3038 static enum ice_status
3039 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3040 struct ice_fltr_list_entry *f_entry)
3042 struct ice_fltr_mgmt_list_entry *fm_entry;
3043 struct ice_aqc_sw_rules_elem *s_rule;
3044 enum ice_status status;
3046 s_rule = (struct ice_aqc_sw_rules_elem *)
3047 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3049 return ICE_ERR_NO_MEMORY;
3050 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3051 ice_malloc(hw, sizeof(*fm_entry));
3053 status = ICE_ERR_NO_MEMORY;
3054 goto ice_create_pkt_fwd_rule_exit;
3057 fm_entry->fltr_info = f_entry->fltr_info;
3059 /* Initialize all the fields for the management entry */
3060 fm_entry->vsi_count = 1;
3061 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3062 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3063 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3065 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3066 ice_aqc_opc_add_sw_rules);
3068 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3069 ice_aqc_opc_add_sw_rules, NULL);
3071 ice_free(hw, fm_entry);
3072 goto ice_create_pkt_fwd_rule_exit;
3075 f_entry->fltr_info.fltr_rule_id =
3076 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3077 fm_entry->fltr_info.fltr_rule_id =
3078 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3080 /* The book keeping entries will get removed when base driver
3081 * calls remove filter AQ command
3083 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3085 ice_create_pkt_fwd_rule_exit:
3086 ice_free(hw, s_rule);
3091 * ice_update_pkt_fwd_rule
3092 * @hw: pointer to the hardware structure
3093 * @f_info: filter information for switch rule
3095 * Call AQ command to update a previously created switch rule with a
3098 static enum ice_status
3099 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3101 struct ice_aqc_sw_rules_elem *s_rule;
3102 enum ice_status status;
3104 s_rule = (struct ice_aqc_sw_rules_elem *)
3105 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3107 return ICE_ERR_NO_MEMORY;
3109 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3111 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3113 /* Update switch rule with new rule set to forward VSI list */
3114 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3115 ice_aqc_opc_update_sw_rules, NULL);
3117 ice_free(hw, s_rule);
3122 * ice_update_sw_rule_bridge_mode
3123 * @hw: pointer to the HW struct
3125 * Updates unicast switch filter rules based on VEB/VEPA mode
3127 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3129 struct ice_switch_info *sw = hw->switch_info;
3130 struct ice_fltr_mgmt_list_entry *fm_entry;
3131 enum ice_status status = ICE_SUCCESS;
3132 struct LIST_HEAD_TYPE *rule_head;
3133 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3135 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3136 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3138 ice_acquire_lock(rule_lock);
3139 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3141 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3142 u8 *addr = fi->l_data.mac.mac_addr;
3144 /* Update unicast Tx rules to reflect the selected
3147 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3148 (fi->fltr_act == ICE_FWD_TO_VSI ||
3149 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3150 fi->fltr_act == ICE_FWD_TO_Q ||
3151 fi->fltr_act == ICE_FWD_TO_QGRP)) {
3152 status = ice_update_pkt_fwd_rule(hw, fi);
3158 ice_release_lock(rule_lock);
3164 * ice_add_update_vsi_list
3165 * @hw: pointer to the hardware structure
3166 * @m_entry: pointer to current filter management list entry
3167 * @cur_fltr: filter information from the book keeping entry
3168 * @new_fltr: filter information with the new VSI to be added
3170 * Call AQ command to add or update previously created VSI list with new VSI.
3172 * Helper function to do book keeping associated with adding filter information
3173 * The algorithm to do the book keeping is described below :
3174 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3175 * if only one VSI has been added till now
3176 * Allocate a new VSI list and add two VSIs
3177 * to this list using switch rule command
3178 * Update the previously created switch rule with the
3179 * newly created VSI list ID
3180 * if a VSI list was previously created
3181 * Add the new VSI to the previously created VSI list set
3182 * using the update switch rule command
3184 static enum ice_status
3185 ice_add_update_vsi_list(struct ice_hw *hw,
3186 struct ice_fltr_mgmt_list_entry *m_entry,
3187 struct ice_fltr_info *cur_fltr,
3188 struct ice_fltr_info *new_fltr)
3190 enum ice_status status = ICE_SUCCESS;
3191 u16 vsi_list_id = 0;
3193 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3194 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3195 return ICE_ERR_NOT_IMPL;
3197 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3198 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3199 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3200 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3201 return ICE_ERR_NOT_IMPL;
3203 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3204 /* Only one entry existed in the mapping and it was not already
3205 * a part of a VSI list. So, create a VSI list with the old and
3208 struct ice_fltr_info tmp_fltr;
3209 u16 vsi_handle_arr[2];
3211 /* A rule already exists with the new VSI being added */
3212 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3213 return ICE_ERR_ALREADY_EXISTS;
3215 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3216 vsi_handle_arr[1] = new_fltr->vsi_handle;
3217 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3219 new_fltr->lkup_type);
3223 tmp_fltr = *new_fltr;
3224 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3225 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3226 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3227 /* Update the previous switch rule of "MAC forward to VSI" to
3228 * "MAC fwd to VSI list"
3230 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3234 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3235 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3236 m_entry->vsi_list_info =
3237 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3240 /* If this entry was large action then the large action needs
3241 * to be updated to point to FWD to VSI list
3243 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3245 ice_add_marker_act(hw, m_entry,
3246 m_entry->sw_marker_id,
3247 m_entry->lg_act_idx);
3249 u16 vsi_handle = new_fltr->vsi_handle;
3250 enum ice_adminq_opc opcode;
3252 if (!m_entry->vsi_list_info)
3255 /* A rule already exists with the new VSI being added */
3256 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3259 /* Update the previously created VSI list set with
3260 * the new VSI ID passed in
3262 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3263 opcode = ice_aqc_opc_update_sw_rules;
3265 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3266 vsi_list_id, false, opcode,
3267 new_fltr->lkup_type);
3268 /* update VSI list mapping info with new VSI ID */
3270 ice_set_bit(vsi_handle,
3271 m_entry->vsi_list_info->vsi_map);
3274 m_entry->vsi_count++;
3279 * ice_find_rule_entry - Search a rule entry
3280 * @list_head: head of rule list
3281 * @f_info: rule information
3283 * Helper function to search for a given rule entry
3284 * Returns pointer to entry storing the rule if found
3286 static struct ice_fltr_mgmt_list_entry *
3287 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3288 struct ice_fltr_info *f_info)
3290 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3292 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3294 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3295 sizeof(f_info->l_data)) &&
3296 f_info->flag == list_itr->fltr_info.flag) {
3305 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3306 * @recp_list: VSI lists needs to be searched
3307 * @vsi_handle: VSI handle to be found in VSI list
3308 * @vsi_list_id: VSI list ID found containing vsi_handle
3310 * Helper function to search a VSI list with single entry containing given VSI
3311 * handle element. This can be extended further to search VSI list with more
3312 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3314 static struct ice_vsi_list_map_info *
3315 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3318 struct ice_vsi_list_map_info *map_info = NULL;
3319 struct LIST_HEAD_TYPE *list_head;
3321 list_head = &recp_list->filt_rules;
3322 if (recp_list->adv_rule) {
3323 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3325 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3326 ice_adv_fltr_mgmt_list_entry,
3328 if (list_itr->vsi_list_info) {
3329 map_info = list_itr->vsi_list_info;
3330 if (ice_is_bit_set(map_info->vsi_map,
3332 *vsi_list_id = map_info->vsi_list_id;
3338 struct ice_fltr_mgmt_list_entry *list_itr;
3340 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3341 ice_fltr_mgmt_list_entry,
3343 if (list_itr->vsi_count == 1 &&
3344 list_itr->vsi_list_info) {
3345 map_info = list_itr->vsi_list_info;
3346 if (ice_is_bit_set(map_info->vsi_map,
3348 *vsi_list_id = map_info->vsi_list_id;
3358 * ice_add_rule_internal - add rule for a given lookup type
3359 * @hw: pointer to the hardware structure
3360 * @recp_list: recipe list for which rule has to be added
3361 * @lport: logic port number on which function add rule
3362 * @f_entry: structure containing MAC forwarding information
3364 * Adds or updates the rule lists for a given recipe
3366 static enum ice_status
3367 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3368 u8 lport, struct ice_fltr_list_entry *f_entry)
3370 struct ice_fltr_info *new_fltr, *cur_fltr;
3371 struct ice_fltr_mgmt_list_entry *m_entry;
3372 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3373 enum ice_status status = ICE_SUCCESS;
3375 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3376 return ICE_ERR_PARAM;
3378 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3379 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3380 f_entry->fltr_info.fwd_id.hw_vsi_id =
3381 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3383 rule_lock = &recp_list->filt_rule_lock;
3385 ice_acquire_lock(rule_lock);
3386 new_fltr = &f_entry->fltr_info;
3387 if (new_fltr->flag & ICE_FLTR_RX)
3388 new_fltr->src = lport;
3389 else if (new_fltr->flag & ICE_FLTR_TX)
3391 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3393 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3395 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3396 goto exit_add_rule_internal;
3399 cur_fltr = &m_entry->fltr_info;
3400 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3402 exit_add_rule_internal:
3403 ice_release_lock(rule_lock);
3408 * ice_remove_vsi_list_rule
3409 * @hw: pointer to the hardware structure
3410 * @vsi_list_id: VSI list ID generated as part of allocate resource
3411 * @lkup_type: switch rule filter lookup type
3413 * The VSI list should be emptied before this function is called to remove the
3416 static enum ice_status
3417 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3418 enum ice_sw_lkup_type lkup_type)
3420 /* Free the vsi_list resource that we allocated. It is assumed that the
3421 * list is empty at this point.
3423 return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3424 ice_aqc_opc_free_res);
3428 * ice_rem_update_vsi_list
3429 * @hw: pointer to the hardware structure
3430 * @vsi_handle: VSI handle of the VSI to remove
3431 * @fm_list: filter management entry for which the VSI list management needs to
3434 static enum ice_status
3435 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3436 struct ice_fltr_mgmt_list_entry *fm_list)
3438 enum ice_sw_lkup_type lkup_type;
3439 enum ice_status status = ICE_SUCCESS;
3442 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3443 fm_list->vsi_count == 0)
3444 return ICE_ERR_PARAM;
3446 /* A rule with the VSI being removed does not exist */
3447 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3448 return ICE_ERR_DOES_NOT_EXIST;
3450 lkup_type = fm_list->fltr_info.lkup_type;
3451 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3452 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3453 ice_aqc_opc_update_sw_rules,
3458 fm_list->vsi_count--;
3459 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3461 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3462 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3463 struct ice_vsi_list_map_info *vsi_list_info =
3464 fm_list->vsi_list_info;
3467 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3469 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3470 return ICE_ERR_OUT_OF_RANGE;
3472 /* Make sure VSI list is empty before removing it below */
3473 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3475 ice_aqc_opc_update_sw_rules,
3480 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3481 tmp_fltr_info.fwd_id.hw_vsi_id =
3482 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3483 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3484 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3486 ice_debug(hw, ICE_DBG_SW,
3487 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3488 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3492 fm_list->fltr_info = tmp_fltr_info;
3495 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3496 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3497 struct ice_vsi_list_map_info *vsi_list_info =
3498 fm_list->vsi_list_info;
3500 /* Remove the VSI list since it is no longer used */
3501 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3503 ice_debug(hw, ICE_DBG_SW,
3504 "Failed to remove VSI list %d, error %d\n",
3505 vsi_list_id, status);
3509 LIST_DEL(&vsi_list_info->list_entry);
3510 ice_free(hw, vsi_list_info);
3511 fm_list->vsi_list_info = NULL;
3518 * ice_remove_rule_internal - Remove a filter rule of a given type
3520 * @hw: pointer to the hardware structure
3521 * @recp_list: recipe list for which the rule needs to removed
3522 * @f_entry: rule entry containing filter information
3524 static enum ice_status
3525 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3526 struct ice_fltr_list_entry *f_entry)
3528 struct ice_fltr_mgmt_list_entry *list_elem;
3529 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3530 enum ice_status status = ICE_SUCCESS;
3531 bool remove_rule = false;
3534 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3535 return ICE_ERR_PARAM;
3536 f_entry->fltr_info.fwd_id.hw_vsi_id =
3537 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3539 rule_lock = &recp_list->filt_rule_lock;
3540 ice_acquire_lock(rule_lock);
3541 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3542 &f_entry->fltr_info);
3544 status = ICE_ERR_DOES_NOT_EXIST;
3548 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3550 } else if (!list_elem->vsi_list_info) {
3551 status = ICE_ERR_DOES_NOT_EXIST;
3553 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3554 /* a ref_cnt > 1 indicates that the vsi_list is being
3555 * shared by multiple rules. Decrement the ref_cnt and
3556 * remove this rule, but do not modify the list, as it
3557 * is in-use by other rules.
3559 list_elem->vsi_list_info->ref_cnt--;
3562 /* a ref_cnt of 1 indicates the vsi_list is only used
3563 * by one rule. However, the original removal request is only
3564 * for a single VSI. Update the vsi_list first, and only
3565 * remove the rule if there are no further VSIs in this list.
3567 vsi_handle = f_entry->fltr_info.vsi_handle;
3568 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3571 /* if VSI count goes to zero after updating the VSI list */
3572 if (list_elem->vsi_count == 0)
3577 /* Remove the lookup rule */
3578 struct ice_aqc_sw_rules_elem *s_rule;
3580 s_rule = (struct ice_aqc_sw_rules_elem *)
3581 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3583 status = ICE_ERR_NO_MEMORY;
3587 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3588 ice_aqc_opc_remove_sw_rules);
3590 status = ice_aq_sw_rules(hw, s_rule,
3591 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3592 ice_aqc_opc_remove_sw_rules, NULL);
3594 /* Remove a book keeping from the list */
3595 ice_free(hw, s_rule);
3600 LIST_DEL(&list_elem->list_entry);
3601 ice_free(hw, list_elem);
3604 ice_release_lock(rule_lock);
3609 * ice_aq_get_res_alloc - get allocated resources
3610 * @hw: pointer to the HW struct
3611 * @num_entries: pointer to u16 to store the number of resource entries returned
3612 * @buf: pointer to buffer
3613 * @buf_size: size of buf
3614 * @cd: pointer to command details structure or NULL
3616 * The caller-supplied buffer must be large enough to store the resource
3617 * information for all resource types. Each resource type is an
3618 * ice_aqc_get_res_resp_elem structure.
3621 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
3622 struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
3623 struct ice_sq_cd *cd)
3625 struct ice_aqc_get_res_alloc *resp;
3626 enum ice_status status;
3627 struct ice_aq_desc desc;
3630 return ICE_ERR_BAD_PTR;
3632 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3633 return ICE_ERR_INVAL_SIZE;
3635 resp = &desc.params.get_res;
3637 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3638 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3640 if (!status && num_entries)
3641 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3647 * ice_aq_get_res_descs - get allocated resource descriptors
3648 * @hw: pointer to the hardware structure
3649 * @num_entries: number of resource entries in buffer
3650 * @buf: structure to hold response data buffer
3651 * @buf_size: size of buffer
3652 * @res_type: resource type
3653 * @res_shared: is resource shared
3654 * @desc_id: input - first desc ID to start; output - next desc ID
3655 * @cd: pointer to command details structure or NULL
3658 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3659 struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
3660 bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
3662 struct ice_aqc_get_allocd_res_desc *cmd;
3663 struct ice_aq_desc desc;
3664 enum ice_status status;
3666 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3668 cmd = &desc.params.get_res_desc;
3671 return ICE_ERR_PARAM;
3673 if (buf_size != (num_entries * sizeof(*buf)))
3674 return ICE_ERR_PARAM;
3676 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3678 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3679 ICE_AQC_RES_TYPE_M) | (res_shared ?
3680 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3681 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3683 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3685 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3691 * ice_add_mac_rule - Add a MAC address based filter rule
3692 * @hw: pointer to the hardware structure
3693 * @m_list: list of MAC addresses and forwarding information
3694 * @sw: pointer to switch info struct for which function add rule
3695 * @lport: logic port number on which function add rule
3697 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3698 * multiple unicast addresses, the function assumes that all the
3699 * addresses are unique in a given add_mac call. It doesn't
3700 * check for duplicates in this case, removing duplicates from a given
3701 * list should be taken care of in the caller of this function.
3703 static enum ice_status
3704 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3705 struct ice_switch_info *sw, u8 lport)
3707 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3708 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3709 struct ice_fltr_list_entry *m_list_itr;
3710 struct LIST_HEAD_TYPE *rule_head;
3711 u16 total_elem_left, s_rule_size;
3712 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3713 enum ice_status status = ICE_SUCCESS;
3714 u16 num_unicast = 0;
3718 rule_lock = &recp_list->filt_rule_lock;
3719 rule_head = &recp_list->filt_rules;
3721 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3723 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3727 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3728 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3729 if (!ice_is_vsi_valid(hw, vsi_handle))
3730 return ICE_ERR_PARAM;
3731 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3732 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3733 /* update the src in case it is VSI num */
3734 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3735 return ICE_ERR_PARAM;
3736 m_list_itr->fltr_info.src = hw_vsi_id;
3737 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3738 IS_ZERO_ETHER_ADDR(add))
3739 return ICE_ERR_PARAM;
3740 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3741 /* Don't overwrite the unicast address */
3742 ice_acquire_lock(rule_lock);
3743 if (ice_find_rule_entry(rule_head,
3744 &m_list_itr->fltr_info)) {
3745 ice_release_lock(rule_lock);
3746 return ICE_ERR_ALREADY_EXISTS;
3748 ice_release_lock(rule_lock);
3750 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3751 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3752 m_list_itr->status =
3753 ice_add_rule_internal(hw, recp_list, lport,
3755 if (m_list_itr->status)
3756 return m_list_itr->status;
3760 ice_acquire_lock(rule_lock);
3761 /* Exit if no suitable entries were found for adding bulk switch rule */
3763 status = ICE_SUCCESS;
3764 goto ice_add_mac_exit;
3767 /* Allocate switch rule buffer for the bulk update for unicast */
3768 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3769 s_rule = (struct ice_aqc_sw_rules_elem *)
3770 ice_calloc(hw, num_unicast, s_rule_size);
3772 status = ICE_ERR_NO_MEMORY;
3773 goto ice_add_mac_exit;
3777 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3779 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3780 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3782 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3783 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3784 ice_aqc_opc_add_sw_rules);
3785 r_iter = (struct ice_aqc_sw_rules_elem *)
3786 ((u8 *)r_iter + s_rule_size);
3790 /* Call AQ bulk switch rule update for all unicast addresses */
3792 /* Call AQ switch rule in AQ_MAX chunk */
3793 for (total_elem_left = num_unicast; total_elem_left > 0;
3794 total_elem_left -= elem_sent) {
3795 struct ice_aqc_sw_rules_elem *entry = r_iter;
3797 elem_sent = MIN_T(u8, total_elem_left,
3798 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3799 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3800 elem_sent, ice_aqc_opc_add_sw_rules,
3803 goto ice_add_mac_exit;
3804 r_iter = (struct ice_aqc_sw_rules_elem *)
3805 ((u8 *)r_iter + (elem_sent * s_rule_size));
3808 /* Fill up rule ID based on the value returned from FW */
3810 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3812 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3813 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3814 struct ice_fltr_mgmt_list_entry *fm_entry;
3816 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3817 f_info->fltr_rule_id =
3818 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3819 f_info->fltr_act = ICE_FWD_TO_VSI;
3820 /* Create an entry to track this MAC address */
3821 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3822 ice_malloc(hw, sizeof(*fm_entry));
3824 status = ICE_ERR_NO_MEMORY;
3825 goto ice_add_mac_exit;
3827 fm_entry->fltr_info = *f_info;
3828 fm_entry->vsi_count = 1;
3829 /* The book keeping entries will get removed when
3830 * base driver calls remove filter AQ command
3833 LIST_ADD(&fm_entry->list_entry, rule_head);
3834 r_iter = (struct ice_aqc_sw_rules_elem *)
3835 ((u8 *)r_iter + s_rule_size);
3840 ice_release_lock(rule_lock);
3842 ice_free(hw, s_rule);
3847 * ice_add_mac - Add a MAC address based filter rule
3848 * @hw: pointer to the hardware structure
3849 * @m_list: list of MAC addresses and forwarding information
3851 * Function add MAC rule for logical port from HW struct
3853 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3856 return ICE_ERR_PARAM;
3858 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3859 hw->port_info->lport);
3863 * ice_add_vlan_internal - Add one VLAN based filter rule
3864 * @hw: pointer to the hardware structure
3865 * @recp_list: recipe list for which rule has to be added
3866 * @f_entry: filter entry containing one VLAN information
3868 static enum ice_status
3869 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3870 struct ice_fltr_list_entry *f_entry)
3872 struct ice_fltr_mgmt_list_entry *v_list_itr;
3873 struct ice_fltr_info *new_fltr, *cur_fltr;
3874 enum ice_sw_lkup_type lkup_type;
3875 u16 vsi_list_id = 0, vsi_handle;
3876 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3877 enum ice_status status = ICE_SUCCESS;
3879 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3880 return ICE_ERR_PARAM;
3882 f_entry->fltr_info.fwd_id.hw_vsi_id =
3883 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3884 new_fltr = &f_entry->fltr_info;
3886 /* VLAN ID should only be 12 bits */
3887 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3888 return ICE_ERR_PARAM;
3890 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3891 return ICE_ERR_PARAM;
3893 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3894 lkup_type = new_fltr->lkup_type;
3895 vsi_handle = new_fltr->vsi_handle;
3896 rule_lock = &recp_list->filt_rule_lock;
3897 ice_acquire_lock(rule_lock);
3898 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3900 struct ice_vsi_list_map_info *map_info = NULL;
3902 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3903 /* All VLAN pruning rules use a VSI list. Check if
3904 * there is already a VSI list containing VSI that we
3905 * want to add. If found, use the same vsi_list_id for
3906 * this new VLAN rule or else create a new list.
3908 map_info = ice_find_vsi_list_entry(recp_list,
3912 status = ice_create_vsi_list_rule(hw,
3920 /* Convert the action to forwarding to a VSI list. */
3921 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3922 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3925 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3927 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3930 status = ICE_ERR_DOES_NOT_EXIST;
3933 /* reuse VSI list for new rule and increment ref_cnt */
3935 v_list_itr->vsi_list_info = map_info;
3936 map_info->ref_cnt++;
3938 v_list_itr->vsi_list_info =
3939 ice_create_vsi_list_map(hw, &vsi_handle,
3943 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3944 /* Update existing VSI list to add new VSI ID only if it used
3947 cur_fltr = &v_list_itr->fltr_info;
3948 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3951 /* If VLAN rule exists and VSI list being used by this rule is
3952 * referenced by more than 1 VLAN rule. Then create a new VSI
3953 * list appending previous VSI with new VSI and update existing
3954 * VLAN rule to point to new VSI list ID
3956 struct ice_fltr_info tmp_fltr;
3957 u16 vsi_handle_arr[2];
3960 /* Current implementation only supports reusing VSI list with
3961 * one VSI count. We should never hit below condition
3963 if (v_list_itr->vsi_count > 1 &&
3964 v_list_itr->vsi_list_info->ref_cnt > 1) {
3965 ice_debug(hw, ICE_DBG_SW,
3966 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3967 status = ICE_ERR_CFG;
3972 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3975 /* A rule already exists with the new VSI being added */
3976 if (cur_handle == vsi_handle) {
3977 status = ICE_ERR_ALREADY_EXISTS;
3981 vsi_handle_arr[0] = cur_handle;
3982 vsi_handle_arr[1] = vsi_handle;
3983 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3984 &vsi_list_id, lkup_type);
3988 tmp_fltr = v_list_itr->fltr_info;
3989 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3990 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3991 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3992 /* Update the previous switch rule to a new VSI list which
3993 * includes current VSI that is requested
3995 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3999 /* before overriding VSI list map info. decrement ref_cnt of
4002 v_list_itr->vsi_list_info->ref_cnt--;
4004 /* now update to newly created list */
4005 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4006 v_list_itr->vsi_list_info =
4007 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4009 v_list_itr->vsi_count++;
4013 ice_release_lock(rule_lock);
4018 * ice_add_vlan_rule - Add VLAN based filter rule
4019 * @hw: pointer to the hardware structure
4020 * @v_list: list of VLAN entries and forwarding information
4021 * @sw: pointer to switch info struct for which function add rule
4023 static enum ice_status
4024 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4025 struct ice_switch_info *sw)
4027 struct ice_fltr_list_entry *v_list_itr;
4028 struct ice_sw_recipe *recp_list;
4030 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4031 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4033 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4034 return ICE_ERR_PARAM;
4035 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4036 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4038 if (v_list_itr->status)
4039 return v_list_itr->status;
4045 * ice_add_vlan - Add a VLAN based filter rule
4046 * @hw: pointer to the hardware structure
4047 * @v_list: list of VLAN and forwarding information
4049 * Function add VLAN rule for logical port from HW struct
4051 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4054 return ICE_ERR_PARAM;
4056 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4060 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4061 * @hw: pointer to the hardware structure
4062 * @mv_list: list of MAC and VLAN filters
4063 * @sw: pointer to switch info struct for which function add rule
4064 * @lport: logic port number on which function add rule
4066 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4067 * pruning bits enabled, then it is the responsibility of the caller to make
4068 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4069 * VLAN won't be received on that VSI otherwise.
4071 static enum ice_status
4072 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4073 struct ice_switch_info *sw, u8 lport)
4075 struct ice_fltr_list_entry *mv_list_itr;
4076 struct ice_sw_recipe *recp_list;
4078 if (!mv_list || !hw)
4079 return ICE_ERR_PARAM;
4081 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4082 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4084 enum ice_sw_lkup_type l_type =
4085 mv_list_itr->fltr_info.lkup_type;
4087 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4088 return ICE_ERR_PARAM;
4089 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4090 mv_list_itr->status =
4091 ice_add_rule_internal(hw, recp_list, lport,
4093 if (mv_list_itr->status)
4094 return mv_list_itr->status;
4100 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4101 * @hw: pointer to the hardware structure
4102 * @mv_list: list of MAC VLAN addresses and forwarding information
4104 * Function add MAC VLAN rule for logical port from HW struct
4107 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4109 if (!mv_list || !hw)
4110 return ICE_ERR_PARAM;
4112 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4113 hw->port_info->lport);
4117 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4118 * @hw: pointer to the hardware structure
4119 * @em_list: list of ether type MAC filter, MAC is optional
4120 * @sw: pointer to switch info struct for which function add rule
4121 * @lport: logic port number on which function add rule
4123 * This function requires the caller to populate the entries in
4124 * the filter list with the necessary fields (including flags to
4125 * indicate Tx or Rx rules).
4127 static enum ice_status
4128 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4129 struct ice_switch_info *sw, u8 lport)
4131 struct ice_fltr_list_entry *em_list_itr;
4133 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4135 struct ice_sw_recipe *recp_list;
4136 enum ice_sw_lkup_type l_type;
4138 l_type = em_list_itr->fltr_info.lkup_type;
4139 recp_list = &sw->recp_list[l_type];
4141 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4142 l_type != ICE_SW_LKUP_ETHERTYPE)
4143 return ICE_ERR_PARAM;
4145 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4148 if (em_list_itr->status)
4149 return em_list_itr->status;
4155 * ice_add_eth_mac - Add a ethertype based filter rule
4156 * @hw: pointer to the hardware structure
4157 * @em_list: list of ethertype and forwarding information
4159 * Function add ethertype rule for logical port from HW struct
4162 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4164 if (!em_list || !hw)
4165 return ICE_ERR_PARAM;
4167 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4168 hw->port_info->lport);
4172 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4173 * @hw: pointer to the hardware structure
4174 * @em_list: list of ethertype or ethertype MAC entries
4175 * @sw: pointer to switch info struct for which function add rule
4177 static enum ice_status
4178 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4179 struct ice_switch_info *sw)
4181 struct ice_fltr_list_entry *em_list_itr, *tmp;
4183 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4185 struct ice_sw_recipe *recp_list;
4186 enum ice_sw_lkup_type l_type;
4188 l_type = em_list_itr->fltr_info.lkup_type;
4190 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4191 l_type != ICE_SW_LKUP_ETHERTYPE)
4192 return ICE_ERR_PARAM;
4194 recp_list = &sw->recp_list[l_type];
4195 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4197 if (em_list_itr->status)
4198 return em_list_itr->status;
4204 * ice_remove_eth_mac - remove a ethertype based filter rule
4205 * @hw: pointer to the hardware structure
4206 * @em_list: list of ethertype and forwarding information
4210 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4212 if (!em_list || !hw)
4213 return ICE_ERR_PARAM;
4215 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4219 * ice_rem_sw_rule_info
4220 * @hw: pointer to the hardware structure
4221 * @rule_head: pointer to the switch list structure that we want to delete
4224 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4226 if (!LIST_EMPTY(rule_head)) {
4227 struct ice_fltr_mgmt_list_entry *entry;
4228 struct ice_fltr_mgmt_list_entry *tmp;
4230 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4231 ice_fltr_mgmt_list_entry, list_entry) {
4232 LIST_DEL(&entry->list_entry);
4233 ice_free(hw, entry);
4239 * ice_rem_adv_rule_info
4240 * @hw: pointer to the hardware structure
4241 * @rule_head: pointer to the switch list structure that we want to delete
4244 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4246 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4247 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4249 if (LIST_EMPTY(rule_head))
4252 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4253 ice_adv_fltr_mgmt_list_entry, list_entry) {
4254 LIST_DEL(&lst_itr->list_entry);
4255 ice_free(hw, lst_itr->lkups);
4256 ice_free(hw, lst_itr);
4261 * ice_rem_all_sw_rules_info
4262 * @hw: pointer to the hardware structure
4264 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4266 struct ice_switch_info *sw = hw->switch_info;
4269 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4270 struct LIST_HEAD_TYPE *rule_head;
4272 rule_head = &sw->recp_list[i].filt_rules;
4273 if (!sw->recp_list[i].adv_rule)
4274 ice_rem_sw_rule_info(hw, rule_head);
4276 ice_rem_adv_rule_info(hw, rule_head);
4277 if (sw->recp_list[i].adv_rule &&
4278 LIST_EMPTY(&sw->recp_list[i].filt_rules))
4279 sw->recp_list[i].adv_rule = false;
4284 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4285 * @pi: pointer to the port_info structure
4286 * @vsi_handle: VSI handle to set as default
4287 * @set: true to add the above mentioned switch rule, false to remove it
4288 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4290 * add filter rule to set/unset given VSI as default VSI for the switch
4291 * (represented by swid)
4294 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4297 struct ice_aqc_sw_rules_elem *s_rule;
4298 struct ice_fltr_info f_info;
4299 struct ice_hw *hw = pi->hw;
4300 enum ice_adminq_opc opcode;
4301 enum ice_status status;
4305 if (!ice_is_vsi_valid(hw, vsi_handle))
4306 return ICE_ERR_PARAM;
4307 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4309 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4310 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4311 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4313 return ICE_ERR_NO_MEMORY;
4315 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4317 f_info.lkup_type = ICE_SW_LKUP_DFLT;
4318 f_info.flag = direction;
4319 f_info.fltr_act = ICE_FWD_TO_VSI;
4320 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4322 if (f_info.flag & ICE_FLTR_RX) {
4323 f_info.src = pi->lport;
4324 f_info.src_id = ICE_SRC_ID_LPORT;
4326 f_info.fltr_rule_id =
4327 pi->dflt_rx_vsi_rule_id;
4328 } else if (f_info.flag & ICE_FLTR_TX) {
4329 f_info.src_id = ICE_SRC_ID_VSI;
4330 f_info.src = hw_vsi_id;
4332 f_info.fltr_rule_id =
4333 pi->dflt_tx_vsi_rule_id;
4337 opcode = ice_aqc_opc_add_sw_rules;
4339 opcode = ice_aqc_opc_remove_sw_rules;
4341 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4343 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4344 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4347 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4349 if (f_info.flag & ICE_FLTR_TX) {
4350 pi->dflt_tx_vsi_num = hw_vsi_id;
4351 pi->dflt_tx_vsi_rule_id = index;
4352 } else if (f_info.flag & ICE_FLTR_RX) {
4353 pi->dflt_rx_vsi_num = hw_vsi_id;
4354 pi->dflt_rx_vsi_rule_id = index;
4357 if (f_info.flag & ICE_FLTR_TX) {
4358 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4359 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4360 } else if (f_info.flag & ICE_FLTR_RX) {
4361 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4362 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4367 ice_free(hw, s_rule);
4372 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4373 * @list_head: head of rule list
4374 * @f_info: rule information
4376 * Helper function to search for a unicast rule entry - this is to be used
4377 * to remove unicast MAC filter that is not shared with other VSIs on the
4380 * Returns pointer to entry storing the rule if found
4382 static struct ice_fltr_mgmt_list_entry *
4383 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4384 struct ice_fltr_info *f_info)
4386 struct ice_fltr_mgmt_list_entry *list_itr;
4388 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4390 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4391 sizeof(f_info->l_data)) &&
4392 f_info->fwd_id.hw_vsi_id ==
4393 list_itr->fltr_info.fwd_id.hw_vsi_id &&
4394 f_info->flag == list_itr->fltr_info.flag)
4401 * ice_remove_mac_rule - remove a MAC based filter rule
4402 * @hw: pointer to the hardware structure
4403 * @m_list: list of MAC addresses and forwarding information
4404 * @recp_list: list from which function remove MAC address
4406 * This function removes either a MAC filter rule or a specific VSI from a
4407 * VSI list for a multicast MAC address.
4409 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4410 * ice_add_mac. Caller should be aware that this call will only work if all
4411 * the entries passed into m_list were added previously. It will not attempt to
4412 * do a partial remove of entries that were found.
4414 static enum ice_status
4415 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4416 struct ice_sw_recipe *recp_list)
4418 struct ice_fltr_list_entry *list_itr, *tmp;
4419 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4422 return ICE_ERR_PARAM;
4424 rule_lock = &recp_list->filt_rule_lock;
4425 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4427 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4428 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4431 if (l_type != ICE_SW_LKUP_MAC)
4432 return ICE_ERR_PARAM;
4434 vsi_handle = list_itr->fltr_info.vsi_handle;
4435 if (!ice_is_vsi_valid(hw, vsi_handle))
4436 return ICE_ERR_PARAM;
4438 list_itr->fltr_info.fwd_id.hw_vsi_id =
4439 ice_get_hw_vsi_num(hw, vsi_handle);
4440 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4441 /* Don't remove the unicast address that belongs to
4442 * another VSI on the switch, since it is not being
4445 ice_acquire_lock(rule_lock);
4446 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4447 &list_itr->fltr_info)) {
4448 ice_release_lock(rule_lock);
4449 return ICE_ERR_DOES_NOT_EXIST;
4451 ice_release_lock(rule_lock);
4453 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4455 if (list_itr->status)
4456 return list_itr->status;
4462 * ice_remove_mac - remove a MAC address based filter rule
4463 * @hw: pointer to the hardware structure
4464 * @m_list: list of MAC addresses and forwarding information
4467 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4469 struct ice_sw_recipe *recp_list;
4471 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4472 return ice_remove_mac_rule(hw, m_list, recp_list);
4476 * ice_remove_vlan_rule - Remove VLAN based filter rule
4477 * @hw: pointer to the hardware structure
4478 * @v_list: list of VLAN entries and forwarding information
4479 * @recp_list: list from which function remove VLAN
4481 static enum ice_status
4482 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4483 struct ice_sw_recipe *recp_list)
4485 struct ice_fltr_list_entry *v_list_itr, *tmp;
4487 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4489 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4491 if (l_type != ICE_SW_LKUP_VLAN)
4492 return ICE_ERR_PARAM;
4493 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4495 if (v_list_itr->status)
4496 return v_list_itr->status;
4502 * ice_remove_vlan - remove a VLAN address based filter rule
4503 * @hw: pointer to the hardware structure
4504 * @v_list: list of VLAN and forwarding information
4508 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4510 struct ice_sw_recipe *recp_list;
4513 return ICE_ERR_PARAM;
4515 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4516 return ice_remove_vlan_rule(hw, v_list, recp_list);
4520 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4521 * @hw: pointer to the hardware structure
4522 * @v_list: list of MAC VLAN entries and forwarding information
4523 * @recp_list: list from which function remove MAC VLAN
4525 static enum ice_status
4526 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4527 struct ice_sw_recipe *recp_list)
4529 struct ice_fltr_list_entry *v_list_itr, *tmp;
4531 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4532 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4534 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4536 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4537 return ICE_ERR_PARAM;
4538 v_list_itr->status =
4539 ice_remove_rule_internal(hw, recp_list,
4541 if (v_list_itr->status)
4542 return v_list_itr->status;
4548 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4549 * @hw: pointer to the hardware structure
4550 * @mv_list: list of MAC VLAN and forwarding information
4553 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4555 struct ice_sw_recipe *recp_list;
4557 if (!mv_list || !hw)
4558 return ICE_ERR_PARAM;
4560 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4561 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4565 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4566 * @fm_entry: filter entry to inspect
4567 * @vsi_handle: VSI handle to compare with filter info
4570 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4572 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4573 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4574 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4575 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4580 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4581 * @hw: pointer to the hardware structure
4582 * @vsi_handle: VSI handle to remove filters from
4583 * @vsi_list_head: pointer to the list to add entry to
4584 * @fi: pointer to fltr_info of filter entry to copy & add
4586 * Helper function, used when creating a list of filters to remove from
4587 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4588 * original filter entry, with the exception of fltr_info.fltr_act and
4589 * fltr_info.fwd_id fields. These are set such that later logic can
4590 * extract which VSI to remove the fltr from, and pass on that information.
4592 static enum ice_status
4593 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4594 struct LIST_HEAD_TYPE *vsi_list_head,
4595 struct ice_fltr_info *fi)
4597 struct ice_fltr_list_entry *tmp;
4599 /* this memory is freed up in the caller function
4600 * once filters for this VSI are removed
4602 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4604 return ICE_ERR_NO_MEMORY;
4606 tmp->fltr_info = *fi;
4608 /* Overwrite these fields to indicate which VSI to remove filter from,
4609 * so find and remove logic can extract the information from the
4610 * list entries. Note that original entries will still have proper
4613 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4614 tmp->fltr_info.vsi_handle = vsi_handle;
4615 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4617 LIST_ADD(&tmp->list_entry, vsi_list_head);
4623 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4624 * @hw: pointer to the hardware structure
4625 * @vsi_handle: VSI handle to remove filters from
4626 * @lkup_list_head: pointer to the list that has certain lookup type filters
4627 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4629 * Locates all filters in lkup_list_head that are used by the given VSI,
4630 * and adds COPIES of those entries to vsi_list_head (intended to be used
4631 * to remove the listed filters).
4632 * Note that this means all entries in vsi_list_head must be explicitly
4633 * deallocated by the caller when done with list.
4635 static enum ice_status
4636 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4637 struct LIST_HEAD_TYPE *lkup_list_head,
4638 struct LIST_HEAD_TYPE *vsi_list_head)
4640 struct ice_fltr_mgmt_list_entry *fm_entry;
4641 enum ice_status status = ICE_SUCCESS;
4643 /* check to make sure VSI ID is valid and within boundary */
4644 if (!ice_is_vsi_valid(hw, vsi_handle))
4645 return ICE_ERR_PARAM;
4647 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4648 ice_fltr_mgmt_list_entry, list_entry) {
4649 struct ice_fltr_info *fi;
4651 fi = &fm_entry->fltr_info;
4652 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4655 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4664 * ice_determine_promisc_mask
4665 * @fi: filter info to parse
4667 * Helper function to determine which ICE_PROMISC_ mask corresponds
4668 * to given filter into.
4670 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4672 u16 vid = fi->l_data.mac_vlan.vlan_id;
4673 u8 *macaddr = fi->l_data.mac.mac_addr;
4674 bool is_tx_fltr = false;
4675 u8 promisc_mask = 0;
4677 if (fi->flag == ICE_FLTR_TX)
4680 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4681 promisc_mask |= is_tx_fltr ?
4682 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4683 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4684 promisc_mask |= is_tx_fltr ?
4685 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4686 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4687 promisc_mask |= is_tx_fltr ?
4688 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4690 promisc_mask |= is_tx_fltr ?
4691 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4693 return promisc_mask;
4697 * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4698 * @hw: pointer to the hardware structure
4699 * @vsi_handle: VSI handle to retrieve info from
4700 * @promisc_mask: pointer to mask to be filled in
4701 * @vid: VLAN ID of promisc VLAN VSI
4702 * @sw: pointer to switch info struct for which function add rule
4704 static enum ice_status
4705 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4706 u16 *vid, struct ice_switch_info *sw)
4708 struct ice_fltr_mgmt_list_entry *itr;
4709 struct LIST_HEAD_TYPE *rule_head;
4710 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4712 if (!ice_is_vsi_valid(hw, vsi_handle))
4713 return ICE_ERR_PARAM;
4717 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4718 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4720 ice_acquire_lock(rule_lock);
4721 LIST_FOR_EACH_ENTRY(itr, rule_head,
4722 ice_fltr_mgmt_list_entry, list_entry) {
4723 /* Continue if this filter doesn't apply to this VSI or the
4724 * VSI ID is not in the VSI map for this filter
4726 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4729 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4731 ice_release_lock(rule_lock);
4737 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4738 * @hw: pointer to the hardware structure
4739 * @vsi_handle: VSI handle to retrieve info from
4740 * @promisc_mask: pointer to mask to be filled in
4741 * @vid: VLAN ID of promisc VLAN VSI
4744 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4747 return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4748 vid, hw->switch_info);
4752 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4753 * @hw: pointer to the hardware structure
4754 * @vsi_handle: VSI handle to retrieve info from
4755 * @promisc_mask: pointer to mask to be filled in
4756 * @vid: VLAN ID of promisc VLAN VSI
4757 * @sw: pointer to switch info struct for which function add rule
4759 static enum ice_status
4760 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4761 u16 *vid, struct ice_switch_info *sw)
4763 struct ice_fltr_mgmt_list_entry *itr;
4764 struct LIST_HEAD_TYPE *rule_head;
4765 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4767 if (!ice_is_vsi_valid(hw, vsi_handle))
4768 return ICE_ERR_PARAM;
4772 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4773 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4775 ice_acquire_lock(rule_lock);
4776 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4778 /* Continue if this filter doesn't apply to this VSI or the
4779 * VSI ID is not in the VSI map for this filter
4781 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4784 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4786 ice_release_lock(rule_lock);
4792 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4793 * @hw: pointer to the hardware structure
4794 * @vsi_handle: VSI handle to retrieve info from
4795 * @promisc_mask: pointer to mask to be filled in
4796 * @vid: VLAN ID of promisc VLAN VSI
4799 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4802 return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
4803 vid, hw->switch_info);
4807 * ice_remove_promisc - Remove promisc based filter rules
4808 * @hw: pointer to the hardware structure
4809 * @recp_id: recipe ID for which the rule needs to removed
4810 * @v_list: list of promisc entries
4812 static enum ice_status
4813 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4814 struct LIST_HEAD_TYPE *v_list)
4816 struct ice_fltr_list_entry *v_list_itr, *tmp;
4817 struct ice_sw_recipe *recp_list;
4819 recp_list = &hw->switch_info->recp_list[recp_id];
4820 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4822 v_list_itr->status =
4823 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4824 if (v_list_itr->status)
4825 return v_list_itr->status;
4831 * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
4832 * @hw: pointer to the hardware structure
4833 * @vsi_handle: VSI handle to clear mode
4834 * @promisc_mask: mask of promiscuous config bits to clear
4835 * @vid: VLAN ID to clear VLAN promiscuous
4836 * @sw: pointer to switch info struct for which function add rule
4838 static enum ice_status
4839 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4840 u16 vid, struct ice_switch_info *sw)
4842 struct ice_fltr_list_entry *fm_entry, *tmp;
4843 struct LIST_HEAD_TYPE remove_list_head;
4844 struct ice_fltr_mgmt_list_entry *itr;
4845 struct LIST_HEAD_TYPE *rule_head;
4846 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4847 enum ice_status status = ICE_SUCCESS;
4850 if (!ice_is_vsi_valid(hw, vsi_handle))
4851 return ICE_ERR_PARAM;
4853 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4854 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4856 recipe_id = ICE_SW_LKUP_PROMISC;
4858 rule_head = &sw->recp_list[recipe_id].filt_rules;
4859 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4861 INIT_LIST_HEAD(&remove_list_head);
4863 ice_acquire_lock(rule_lock);
4864 LIST_FOR_EACH_ENTRY(itr, rule_head,
4865 ice_fltr_mgmt_list_entry, list_entry) {
4866 struct ice_fltr_info *fltr_info;
4867 u8 fltr_promisc_mask = 0;
4869 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4871 fltr_info = &itr->fltr_info;
4873 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4874 vid != fltr_info->l_data.mac_vlan.vlan_id)
4877 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4879 /* Skip if filter is not completely specified by given mask */
4880 if (fltr_promisc_mask & ~promisc_mask)
4883 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4887 ice_release_lock(rule_lock);
4888 goto free_fltr_list;
4891 ice_release_lock(rule_lock);
4893 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4896 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4897 ice_fltr_list_entry, list_entry) {
4898 LIST_DEL(&fm_entry->list_entry);
4899 ice_free(hw, fm_entry);
4906 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4907 * @hw: pointer to the hardware structure
4908 * @vsi_handle: VSI handle to clear mode
4909 * @promisc_mask: mask of promiscuous config bits to clear
4910 * @vid: VLAN ID to clear VLAN promiscuous
4913 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
4914 u8 promisc_mask, u16 vid)
4916 return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
4917 vid, hw->switch_info);
4921 * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4922 * @hw: pointer to the hardware structure
4923 * @vsi_handle: VSI handle to configure
4924 * @promisc_mask: mask of promiscuous config bits
4925 * @vid: VLAN ID to set VLAN promiscuous
4926 * @lport: logical port number to configure promisc mode
4927 * @sw: pointer to switch info struct for which function add rule
4929 static enum ice_status
4930 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4931 u16 vid, u8 lport, struct ice_switch_info *sw)
4933 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4934 struct ice_fltr_list_entry f_list_entry;
4935 struct ice_fltr_info new_fltr;
4936 enum ice_status status = ICE_SUCCESS;
4942 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4944 if (!ice_is_vsi_valid(hw, vsi_handle))
4945 return ICE_ERR_PARAM;
4946 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4948 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4950 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4951 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4952 new_fltr.l_data.mac_vlan.vlan_id = vid;
4953 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4955 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4956 recipe_id = ICE_SW_LKUP_PROMISC;
4959 /* Separate filters must be set for each direction/packet type
4960 * combination, so we will loop over the mask value, store the
4961 * individual type, and clear it out in the input mask as it
4964 while (promisc_mask) {
4965 struct ice_sw_recipe *recp_list;
4971 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4972 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4973 pkt_type = UCAST_FLTR;
4974 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4975 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4976 pkt_type = UCAST_FLTR;
4978 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4979 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4980 pkt_type = MCAST_FLTR;
4981 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4982 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4983 pkt_type = MCAST_FLTR;
4985 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4986 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4987 pkt_type = BCAST_FLTR;
4988 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4989 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4990 pkt_type = BCAST_FLTR;
4994 /* Check for VLAN promiscuous flag */
4995 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4996 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4997 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4998 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5002 /* Set filter DA based on packet type */
5003 mac_addr = new_fltr.l_data.mac.mac_addr;
5004 if (pkt_type == BCAST_FLTR) {
5005 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5006 } else if (pkt_type == MCAST_FLTR ||
5007 pkt_type == UCAST_FLTR) {
5008 /* Use the dummy ether header DA */
5009 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5010 ICE_NONDMA_TO_NONDMA);
5011 if (pkt_type == MCAST_FLTR)
5012 mac_addr[0] |= 0x1; /* Set multicast bit */
5015 /* Need to reset this to zero for all iterations */
5018 new_fltr.flag |= ICE_FLTR_TX;
5019 new_fltr.src = hw_vsi_id;
5021 new_fltr.flag |= ICE_FLTR_RX;
5022 new_fltr.src = lport;
5025 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5026 new_fltr.vsi_handle = vsi_handle;
5027 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5028 f_list_entry.fltr_info = new_fltr;
5029 recp_list = &sw->recp_list[recipe_id];
5031 status = ice_add_rule_internal(hw, recp_list, lport,
5033 if (status != ICE_SUCCESS)
5034 goto set_promisc_exit;
5042 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5043 * @hw: pointer to the hardware structure
5044 * @vsi_handle: VSI handle to configure
5045 * @promisc_mask: mask of promiscuous config bits
5046 * @vid: VLAN ID to set VLAN promiscuous
5049 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5052 return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5053 hw->port_info->lport,
5058 * _ice_set_vlan_vsi_promisc
5059 * @hw: pointer to the hardware structure
5060 * @vsi_handle: VSI handle to configure
5061 * @promisc_mask: mask of promiscuous config bits
5062 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5063 * @lport: logical port number to configure promisc mode
5064 * @sw: pointer to switch info struct for which function add rule
5066 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5068 static enum ice_status
5069 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5070 bool rm_vlan_promisc, u8 lport,
5071 struct ice_switch_info *sw)
5073 struct ice_fltr_list_entry *list_itr, *tmp;
5074 struct LIST_HEAD_TYPE vsi_list_head;
5075 struct LIST_HEAD_TYPE *vlan_head;
5076 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5077 enum ice_status status;
5080 INIT_LIST_HEAD(&vsi_list_head);
5081 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5082 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5083 ice_acquire_lock(vlan_lock);
5084 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5086 ice_release_lock(vlan_lock);
5088 goto free_fltr_list;
5090 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5092 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5093 if (rm_vlan_promisc)
5094 status = _ice_clear_vsi_promisc(hw, vsi_handle,
5098 status = _ice_set_vsi_promisc(hw, vsi_handle,
5099 promisc_mask, vlan_id,
5106 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5107 ice_fltr_list_entry, list_entry) {
5108 LIST_DEL(&list_itr->list_entry);
5109 ice_free(hw, list_itr);
5115 * ice_set_vlan_vsi_promisc
5116 * @hw: pointer to the hardware structure
5117 * @vsi_handle: VSI handle to configure
5118 * @promisc_mask: mask of promiscuous config bits
5119 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5121 * Configure VSI with all associated VLANs to given promiscuous mode(s)
5124 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5125 bool rm_vlan_promisc)
5127 return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5128 rm_vlan_promisc, hw->port_info->lport,
5133 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5134 * @hw: pointer to the hardware structure
5135 * @vsi_handle: VSI handle to remove filters from
5136 * @recp_list: recipe list from which function remove fltr
5137 * @lkup: switch rule filter lookup type
5140 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5141 struct ice_sw_recipe *recp_list,
5142 enum ice_sw_lkup_type lkup)
5144 struct ice_fltr_list_entry *fm_entry;
5145 struct LIST_HEAD_TYPE remove_list_head;
5146 struct LIST_HEAD_TYPE *rule_head;
5147 struct ice_fltr_list_entry *tmp;
5148 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5149 enum ice_status status;
5151 INIT_LIST_HEAD(&remove_list_head);
5152 rule_lock = &recp_list[lkup].filt_rule_lock;
5153 rule_head = &recp_list[lkup].filt_rules;
5154 ice_acquire_lock(rule_lock);
5155 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5157 ice_release_lock(rule_lock);
5162 case ICE_SW_LKUP_MAC:
5163 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5165 case ICE_SW_LKUP_VLAN:
5166 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5168 case ICE_SW_LKUP_PROMISC:
5169 case ICE_SW_LKUP_PROMISC_VLAN:
5170 ice_remove_promisc(hw, lkup, &remove_list_head);
5172 case ICE_SW_LKUP_MAC_VLAN:
5173 ice_remove_mac_vlan(hw, &remove_list_head);
5175 case ICE_SW_LKUP_ETHERTYPE:
5176 case ICE_SW_LKUP_ETHERTYPE_MAC:
5177 ice_remove_eth_mac(hw, &remove_list_head);
5179 case ICE_SW_LKUP_DFLT:
5180 ice_debug(hw, ICE_DBG_SW,
5181 "Remove filters for this lookup type hasn't been implemented yet\n");
5183 case ICE_SW_LKUP_LAST:
5184 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5188 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5189 ice_fltr_list_entry, list_entry) {
5190 LIST_DEL(&fm_entry->list_entry);
5191 ice_free(hw, fm_entry);
5196 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5197 * @hw: pointer to the hardware structure
5198 * @vsi_handle: VSI handle to remove filters from
5199 * @sw: pointer to switch info struct
5202 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5203 struct ice_switch_info *sw)
5205 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5207 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5208 sw->recp_list, ICE_SW_LKUP_MAC);
5209 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5210 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5211 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5212 sw->recp_list, ICE_SW_LKUP_PROMISC);
5213 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5214 sw->recp_list, ICE_SW_LKUP_VLAN);
5215 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5216 sw->recp_list, ICE_SW_LKUP_DFLT);
5217 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5218 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5219 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5220 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5221 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5222 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5226 * ice_remove_vsi_fltr - Remove all filters for a VSI
5227 * @hw: pointer to the hardware structure
5228 * @vsi_handle: VSI handle to remove filters from
5230 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5232 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5236 * ice_alloc_res_cntr - allocating resource counter
5237 * @hw: pointer to the hardware structure
5238 * @type: type of resource
5239 * @alloc_shared: if set it is shared else dedicated
5240 * @num_items: number of entries requested for FD resource type
5241 * @counter_id: counter index returned by AQ call
5244 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5247 struct ice_aqc_alloc_free_res_elem *buf;
5248 enum ice_status status;
5251 /* Allocate resource */
5252 buf_len = ice_struct_size(buf, elem, 1);
5253 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5255 return ICE_ERR_NO_MEMORY;
5257 buf->num_elems = CPU_TO_LE16(num_items);
5258 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5259 ICE_AQC_RES_TYPE_M) | alloc_shared);
5261 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5262 ice_aqc_opc_alloc_res, NULL);
5266 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5274 * ice_free_res_cntr - free resource counter
5275 * @hw: pointer to the hardware structure
5276 * @type: type of resource
5277 * @alloc_shared: if set it is shared else dedicated
5278 * @num_items: number of entries to be freed for FD resource type
5279 * @counter_id: counter ID resource which needs to be freed
5282 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5285 struct ice_aqc_alloc_free_res_elem *buf;
5286 enum ice_status status;
5290 buf_len = ice_struct_size(buf, elem, 1);
5291 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5293 return ICE_ERR_NO_MEMORY;
5295 buf->num_elems = CPU_TO_LE16(num_items);
5296 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5297 ICE_AQC_RES_TYPE_M) | alloc_shared);
5298 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5300 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5301 ice_aqc_opc_free_res, NULL);
5303 ice_debug(hw, ICE_DBG_SW,
5304 "counter resource could not be freed\n");
5311 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5312 * @hw: pointer to the hardware structure
5313 * @counter_id: returns counter index
5315 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5317 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5318 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5323 * ice_free_vlan_res_counter - Free counter resource for VLAN type
5324 * @hw: pointer to the hardware structure
5325 * @counter_id: counter index to be freed
5327 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5329 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5330 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5335 * ice_alloc_res_lg_act - add large action resource
5336 * @hw: pointer to the hardware structure
5337 * @l_id: large action ID to fill it in
5338 * @num_acts: number of actions to hold with a large action entry
5340 static enum ice_status
5341 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5343 struct ice_aqc_alloc_free_res_elem *sw_buf;
5344 enum ice_status status;
5347 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5348 return ICE_ERR_PARAM;
5350 /* Allocate resource for large action */
5351 buf_len = ice_struct_size(sw_buf, elem, 1);
5352 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5354 return ICE_ERR_NO_MEMORY;
5356 sw_buf->num_elems = CPU_TO_LE16(1);
5358 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5359 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5360 * If num_acts is greater than 2, then use
5361 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5362 * The num_acts cannot exceed 4. This was ensured at the
5363 * beginning of the function.
5366 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5367 else if (num_acts == 2)
5368 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5370 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5372 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5373 ice_aqc_opc_alloc_res, NULL);
5375 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5377 ice_free(hw, sw_buf);
5382 * ice_add_mac_with_sw_marker - add filter with sw marker
5383 * @hw: pointer to the hardware structure
5384 * @f_info: filter info structure containing the MAC filter information
5385 * @sw_marker: sw marker to tag the Rx descriptor with
5388 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5391 struct ice_fltr_mgmt_list_entry *m_entry;
5392 struct ice_fltr_list_entry fl_info;
5393 struct ice_sw_recipe *recp_list;
5394 struct LIST_HEAD_TYPE l_head;
5395 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5396 enum ice_status ret;
5400 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5401 return ICE_ERR_PARAM;
5403 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5404 return ICE_ERR_PARAM;
5406 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5407 return ICE_ERR_PARAM;
5409 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5410 return ICE_ERR_PARAM;
5411 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5413 /* Add filter if it doesn't exist so then the adding of large
5414 * action always results in update
5417 INIT_LIST_HEAD(&l_head);
5418 fl_info.fltr_info = *f_info;
5419 LIST_ADD(&fl_info.list_entry, &l_head);
5421 entry_exists = false;
5422 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5423 hw->port_info->lport);
5424 if (ret == ICE_ERR_ALREADY_EXISTS)
5425 entry_exists = true;
5429 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5430 rule_lock = &recp_list->filt_rule_lock;
5431 ice_acquire_lock(rule_lock);
5432 /* Get the book keeping entry for the filter */
5433 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5437 /* If counter action was enabled for this rule then don't enable
5438 * sw marker large action
5440 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5441 ret = ICE_ERR_PARAM;
5445 /* if same marker was added before */
5446 if (m_entry->sw_marker_id == sw_marker) {
5447 ret = ICE_ERR_ALREADY_EXISTS;
5451 /* Allocate a hardware table entry to hold large act. Three actions
5452 * for marker based large action
5454 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5458 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5461 /* Update the switch rule to add the marker action */
5462 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5464 ice_release_lock(rule_lock);
5469 ice_release_lock(rule_lock);
5470 /* only remove entry if it did not exist previously */
5472 ret = ice_remove_mac(hw, &l_head);
5478 * ice_add_mac_with_counter - add filter with counter enabled
5479 * @hw: pointer to the hardware structure
5480 * @f_info: pointer to filter info structure containing the MAC filter
5484 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5486 struct ice_fltr_mgmt_list_entry *m_entry;
5487 struct ice_fltr_list_entry fl_info;
5488 struct ice_sw_recipe *recp_list;
5489 struct LIST_HEAD_TYPE l_head;
5490 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5491 enum ice_status ret;
5496 if (f_info->fltr_act != ICE_FWD_TO_VSI)
5497 return ICE_ERR_PARAM;
5499 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5500 return ICE_ERR_PARAM;
5502 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5503 return ICE_ERR_PARAM;
5504 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5505 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5507 entry_exist = false;
5509 rule_lock = &recp_list->filt_rule_lock;
5511 /* Add filter if it doesn't exist so then the adding of large
5512 * action always results in update
5514 INIT_LIST_HEAD(&l_head);
5516 fl_info.fltr_info = *f_info;
5517 LIST_ADD(&fl_info.list_entry, &l_head);
5519 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5520 hw->port_info->lport);
5521 if (ret == ICE_ERR_ALREADY_EXISTS)
5526 ice_acquire_lock(rule_lock);
5527 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5529 ret = ICE_ERR_BAD_PTR;
5533 /* Don't enable counter for a filter for which sw marker was enabled */
5534 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5535 ret = ICE_ERR_PARAM;
5539 /* If a counter was already enabled then don't need to add again */
5540 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5541 ret = ICE_ERR_ALREADY_EXISTS;
5545 /* Allocate a hardware table entry to VLAN counter */
5546 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5550 /* Allocate a hardware table entry to hold large act. Two actions for
5551 * counter based large action
5553 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5557 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5560 /* Update the switch rule to add the counter action */
5561 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5563 ice_release_lock(rule_lock);
5568 ice_release_lock(rule_lock);
5569 /* only remove entry if it did not exist previously */
5571 ret = ice_remove_mac(hw, &l_head);
5576 /* This is mapping table entry that maps every word within a given protocol
5577 * structure to the real byte offset as per the specification of that
5579 * for example dst address is 3 words in ethertype header and corresponding
5580 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5581 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5582 * matching entry describing its field. This needs to be updated if new
5583 * structure is added to that union.
5585 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5586 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
5587 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
5588 { ICE_ETYPE_OL, { 0 } },
5589 { ICE_VLAN_OFOS, { 0, 2 } },
5590 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5591 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5592 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5593 26, 28, 30, 32, 34, 36, 38 } },
5594 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5595 26, 28, 30, 32, 34, 36, 38 } },
5596 { ICE_TCP_IL, { 0, 2 } },
5597 { ICE_UDP_OF, { 0, 2 } },
5598 { ICE_UDP_ILOS, { 0, 2 } },
5599 { ICE_SCTP_IL, { 0, 2 } },
5600 { ICE_VXLAN, { 8, 10, 12, 14 } },
5601 { ICE_GENEVE, { 8, 10, 12, 14 } },
5602 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
5603 { ICE_NVGRE, { 0, 2, 4, 6 } },
5604 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
5605 { ICE_PPPOE, { 0, 2, 4, 6 } },
5606 { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
5607 { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
5608 { ICE_ESP, { 0, 2, 4, 6 } },
5609 { ICE_AH, { 0, 2, 4, 6, 8, 10 } },
5610 { ICE_NAT_T, { 8, 10, 12, 14 } },
5613 /* The following table describes preferred grouping of recipes.
5614 * If a recipe that needs to be programmed is a superset or matches one of the
5615 * following combinations, then the recipe needs to be chained as per the
5619 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5620 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
5621 { ICE_MAC_IL, ICE_MAC_IL_HW },
5622 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
5623 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
5624 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
5625 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
5626 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
5627 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
5628 { ICE_TCP_IL, ICE_TCP_IL_HW },
5629 { ICE_UDP_OF, ICE_UDP_OF_HW },
5630 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
5631 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
5632 { ICE_VXLAN, ICE_UDP_OF_HW },
5633 { ICE_GENEVE, ICE_UDP_OF_HW },
5634 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
5635 { ICE_NVGRE, ICE_GRE_OF_HW },
5636 { ICE_GTP, ICE_UDP_OF_HW },
5637 { ICE_PPPOE, ICE_PPPOE_HW },
5638 { ICE_PFCP, ICE_UDP_ILOS_HW },
5639 { ICE_L2TPV3, ICE_L2TPV3_HW },
5640 { ICE_ESP, ICE_ESP_HW },
5641 { ICE_AH, ICE_AH_HW },
5642 { ICE_NAT_T, ICE_UDP_ILOS_HW },
5646 * ice_find_recp - find a recipe
5647 * @hw: pointer to the hardware structure
5648 * @lkup_exts: extension sequence to match
5650 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5652 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5653 enum ice_sw_tunnel_type tun_type)
5655 bool refresh_required = true;
5656 struct ice_sw_recipe *recp;
5659 /* Walk through existing recipes to find a match */
5660 recp = hw->switch_info->recp_list;
5661 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5662 /* If recipe was not created for this ID, in SW bookkeeping,
5663 * check if FW has an entry for this recipe. If the FW has an
5664 * entry update it in our SW bookkeeping and continue with the
5667 if (!recp[i].recp_created)
5668 if (ice_get_recp_frm_fw(hw,
5669 hw->switch_info->recp_list, i,
5673 /* Skip inverse action recipes */
5674 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5675 ICE_AQ_RECIPE_ACT_INV_ACT)
5678 /* if number of words we are looking for match */
5679 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5680 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5681 struct ice_fv_word *be = lkup_exts->fv_words;
5682 u16 *cr = recp[i].lkup_exts.field_mask;
5683 u16 *de = lkup_exts->field_mask;
5687 /* ar, cr, and qr are related to the recipe words, while
5688 * be, de, and pe are related to the lookup words
5690 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5691 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5693 if (ar[qr].off == be[pe].off &&
5694 ar[qr].prot_id == be[pe].prot_id &&
5696 /* Found the "pe"th word in the
5701 /* After walking through all the words in the
5702 * "i"th recipe if "p"th word was not found then
5703 * this recipe is not what we are looking for.
5704 * So break out from this loop and try the next
5707 if (qr >= recp[i].lkup_exts.n_val_words) {
5712 /* If for "i"th recipe the found was never set to false
5713 * then it means we found our match
5715 if (tun_type == recp[i].tun_type && found)
5716 return i; /* Return the recipe ID */
5719 return ICE_MAX_NUM_RECIPES;
5723 * ice_prot_type_to_id - get protocol ID from protocol type
5724 * @type: protocol type
5725 * @id: pointer to variable that will receive the ID
5727 * Returns true if found, false otherwise
5729 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5733 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5734 if (ice_prot_id_tbl[i].type == type) {
5735 *id = ice_prot_id_tbl[i].protocol_id;
5742 * ice_find_valid_words - count valid words
5743 * @rule: advanced rule with lookup information
5744 * @lkup_exts: byte offset extractions of the words that are valid
5746 * calculate valid words in a lookup rule using mask value
5749 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5750 struct ice_prot_lkup_ext *lkup_exts)
5752 u8 j, word, prot_id, ret_val;
5754 if (!ice_prot_type_to_id(rule->type, &prot_id))
5757 word = lkup_exts->n_val_words;
5759 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5760 if (((u16 *)&rule->m_u)[j] &&
5761 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5762 /* No more space to accommodate */
5763 if (word >= ICE_MAX_CHAIN_WORDS)
5765 lkup_exts->fv_words[word].off =
5766 ice_prot_ext[rule->type].offs[j];
5767 lkup_exts->fv_words[word].prot_id =
5768 ice_prot_id_tbl[rule->type].protocol_id;
5769 lkup_exts->field_mask[word] =
5770 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
5774 ret_val = word - lkup_exts->n_val_words;
5775 lkup_exts->n_val_words = word;
5781 * ice_create_first_fit_recp_def - Create a recipe grouping
5782 * @hw: pointer to the hardware structure
5783 * @lkup_exts: an array of protocol header extractions
5784 * @rg_list: pointer to a list that stores new recipe groups
5785 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5787 * Using first fit algorithm, take all the words that are still not done
5788 * and start grouping them in 4-word groups. Each group makes up one
5791 static enum ice_status
5792 ice_create_first_fit_recp_def(struct ice_hw *hw,
5793 struct ice_prot_lkup_ext *lkup_exts,
5794 struct LIST_HEAD_TYPE *rg_list,
5797 struct ice_pref_recipe_group *grp = NULL;
5802 if (!lkup_exts->n_val_words) {
5803 struct ice_recp_grp_entry *entry;
5805 entry = (struct ice_recp_grp_entry *)
5806 ice_malloc(hw, sizeof(*entry));
5808 return ICE_ERR_NO_MEMORY;
5809 LIST_ADD(&entry->l_entry, rg_list);
5810 grp = &entry->r_group;
5812 grp->n_val_pairs = 0;
5815 /* Walk through every word in the rule to check if it is not done. If so
5816 * then this word needs to be part of a new recipe.
5818 for (j = 0; j < lkup_exts->n_val_words; j++)
5819 if (!ice_is_bit_set(lkup_exts->done, j)) {
5821 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5822 struct ice_recp_grp_entry *entry;
5824 entry = (struct ice_recp_grp_entry *)
5825 ice_malloc(hw, sizeof(*entry));
5827 return ICE_ERR_NO_MEMORY;
5828 LIST_ADD(&entry->l_entry, rg_list);
5829 grp = &entry->r_group;
5833 grp->pairs[grp->n_val_pairs].prot_id =
5834 lkup_exts->fv_words[j].prot_id;
5835 grp->pairs[grp->n_val_pairs].off =
5836 lkup_exts->fv_words[j].off;
5837 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5845 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5846 * @hw: pointer to the hardware structure
5847 * @fv_list: field vector with the extraction sequence information
5848 * @rg_list: recipe groupings with protocol-offset pairs
5850 * Helper function to fill in the field vector indices for protocol-offset
5851 * pairs. These indexes are then ultimately programmed into a recipe.
5853 static enum ice_status
5854 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5855 struct LIST_HEAD_TYPE *rg_list)
5857 struct ice_sw_fv_list_entry *fv;
5858 struct ice_recp_grp_entry *rg;
5859 struct ice_fv_word *fv_ext;
5861 if (LIST_EMPTY(fv_list))
5864 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5865 fv_ext = fv->fv_ptr->ew;
5867 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5870 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5871 struct ice_fv_word *pr;
5876 pr = &rg->r_group.pairs[i];
5877 mask = rg->r_group.mask[i];
5879 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5880 if (fv_ext[j].prot_id == pr->prot_id &&
5881 fv_ext[j].off == pr->off) {
5884 /* Store index of field vector */
5886 rg->fv_mask[i] = mask;
5890 /* Protocol/offset could not be found, caller gave an
5894 return ICE_ERR_PARAM;
5902 * ice_find_free_recp_res_idx - find free result indexes for recipe
5903 * @hw: pointer to hardware structure
5904 * @profiles: bitmap of profiles that will be associated with the new recipe
5905 * @free_idx: pointer to variable to receive the free index bitmap
5907 * The algorithm used here is:
5908 * 1. When creating a new recipe, create a set P which contains all
5909 * Profiles that will be associated with our new recipe
5911 * 2. For each Profile p in set P:
5912 * a. Add all recipes associated with Profile p into set R
5913 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5914 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5915 * i. Or just assume they all have the same possible indexes:
5917 * i.e., PossibleIndexes = 0x0000F00000000000
5919 * 3. For each Recipe r in set R:
5920 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5921 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5923 * FreeIndexes will contain the bits indicating the indexes free for use,
5924 * then the code needs to update the recipe[r].used_result_idx_bits to
5925 * indicate which indexes were selected for use by this recipe.
5928 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5929 ice_bitmap_t *free_idx)
5931 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5932 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5933 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5936 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5937 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5938 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5939 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5941 ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
5943 /* For each profile we are going to associate the recipe with, add the
5944 * recipes that are associated with that profile. This will give us
5945 * the set of recipes that our recipe may collide with. Also, determine
5946 * what possible result indexes are usable given this set of profiles.
5948 ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
5949 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5950 ICE_MAX_NUM_RECIPES);
5951 ice_and_bitmap(possible_idx, possible_idx,
5952 hw->switch_info->prof_res_bm[bit],
5956 /* For each recipe that our new recipe may collide with, determine
5957 * which indexes have been used.
5959 ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
5960 ice_or_bitmap(used_idx, used_idx,
5961 hw->switch_info->recp_list[bit].res_idxs,
5964 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5966 /* return number of free indexes */
5967 return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
5971 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5972 * @hw: pointer to hardware structure
5973 * @rm: recipe management list entry
5974 * @profiles: bitmap of profiles that will be associated.
5976 static enum ice_status
5977 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5978 ice_bitmap_t *profiles)
5980 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5981 struct ice_aqc_recipe_data_elem *tmp;
5982 struct ice_aqc_recipe_data_elem *buf;
5983 struct ice_recp_grp_entry *entry;
5984 enum ice_status status;
5990 /* When more than one recipe are required, another recipe is needed to
5991 * chain them together. Matching a tunnel metadata ID takes up one of
5992 * the match fields in the chaining recipe reducing the number of
5993 * chained recipes by one.
5995 /* check number of free result indices */
5996 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5997 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5999 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6000 free_res_idx, rm->n_grp_count);
6002 if (rm->n_grp_count > 1) {
6003 if (rm->n_grp_count > free_res_idx)
6004 return ICE_ERR_MAX_LIMIT;
6009 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6010 return ICE_ERR_MAX_LIMIT;
6012 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6013 ICE_MAX_NUM_RECIPES,
6016 return ICE_ERR_NO_MEMORY;
6018 buf = (struct ice_aqc_recipe_data_elem *)
6019 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6021 status = ICE_ERR_NO_MEMORY;
6025 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6026 recipe_count = ICE_MAX_NUM_RECIPES;
6027 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6029 if (status || recipe_count == 0)
6032 /* Allocate the recipe resources, and configure them according to the
6033 * match fields from protocol headers and extracted field vectors.
6035 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6036 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6039 status = ice_alloc_recipe(hw, &entry->rid);
6043 /* Clear the result index of the located recipe, as this will be
6044 * updated, if needed, later in the recipe creation process.
6046 tmp[0].content.result_indx = 0;
6048 buf[recps] = tmp[0];
6049 buf[recps].recipe_indx = (u8)entry->rid;
6050 /* if the recipe is a non-root recipe RID should be programmed
6051 * as 0 for the rules to be applied correctly.
6053 buf[recps].content.rid = 0;
6054 ice_memset(&buf[recps].content.lkup_indx, 0,
6055 sizeof(buf[recps].content.lkup_indx),
6058 /* All recipes use look-up index 0 to match switch ID. */
6059 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6060 buf[recps].content.mask[0] =
6061 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6062 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6065 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6066 buf[recps].content.lkup_indx[i] = 0x80;
6067 buf[recps].content.mask[i] = 0;
6070 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6071 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6072 buf[recps].content.mask[i + 1] =
6073 CPU_TO_LE16(entry->fv_mask[i]);
6076 if (rm->n_grp_count > 1) {
6077 /* Checks to see if there really is a valid result index
6080 if (chain_idx >= ICE_MAX_FV_WORDS) {
6081 ice_debug(hw, ICE_DBG_SW,
6082 "No chain index available\n");
6083 status = ICE_ERR_MAX_LIMIT;
6087 entry->chain_idx = chain_idx;
6088 buf[recps].content.result_indx =
6089 ICE_AQ_RECIPE_RESULT_EN |
6090 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6091 ICE_AQ_RECIPE_RESULT_DATA_M);
6092 ice_clear_bit(chain_idx, result_idx_bm);
6093 chain_idx = ice_find_first_bit(result_idx_bm,
6097 /* fill recipe dependencies */
6098 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6099 ICE_MAX_NUM_RECIPES);
6100 ice_set_bit(buf[recps].recipe_indx,
6101 (ice_bitmap_t *)buf[recps].recipe_bitmap);
6102 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6106 if (rm->n_grp_count == 1) {
6107 rm->root_rid = buf[0].recipe_indx;
6108 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6109 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6110 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6111 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6112 sizeof(buf[0].recipe_bitmap),
6113 ICE_NONDMA_TO_NONDMA);
6115 status = ICE_ERR_BAD_PTR;
6118 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6119 * the recipe which is getting created if specified
6120 * by user. Usually any advanced switch filter, which results
6121 * into new extraction sequence, ended up creating a new recipe
6122 * of type ROOT and usually recipes are associated with profiles
6123 * Switch rule referreing newly created recipe, needs to have
6124 * either/or 'fwd' or 'join' priority, otherwise switch rule
6125 * evaluation will not happen correctly. In other words, if
6126 * switch rule to be evaluated on priority basis, then recipe
6127 * needs to have priority, otherwise it will be evaluated last.
6129 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6131 struct ice_recp_grp_entry *last_chain_entry;
6134 /* Allocate the last recipe that will chain the outcomes of the
6135 * other recipes together
6137 status = ice_alloc_recipe(hw, &rid);
6141 buf[recps].recipe_indx = (u8)rid;
6142 buf[recps].content.rid = (u8)rid;
6143 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6144 /* the new entry created should also be part of rg_list to
6145 * make sure we have complete recipe
6147 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6148 sizeof(*last_chain_entry));
6149 if (!last_chain_entry) {
6150 status = ICE_ERR_NO_MEMORY;
6153 last_chain_entry->rid = rid;
6154 ice_memset(&buf[recps].content.lkup_indx, 0,
6155 sizeof(buf[recps].content.lkup_indx),
6157 /* All recipes use look-up index 0 to match switch ID. */
6158 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6159 buf[recps].content.mask[0] =
6160 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6161 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6162 buf[recps].content.lkup_indx[i] =
6163 ICE_AQ_RECIPE_LKUP_IGNORE;
6164 buf[recps].content.mask[i] = 0;
6168 /* update r_bitmap with the recp that is used for chaining */
6169 ice_set_bit(rid, rm->r_bitmap);
6170 /* this is the recipe that chains all the other recipes so it
6171 * should not have a chaining ID to indicate the same
6173 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6174 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6176 last_chain_entry->fv_idx[i] = entry->chain_idx;
6177 buf[recps].content.lkup_indx[i] = entry->chain_idx;
6178 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6179 ice_set_bit(entry->rid, rm->r_bitmap);
6181 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6182 if (sizeof(buf[recps].recipe_bitmap) >=
6183 sizeof(rm->r_bitmap)) {
6184 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6185 sizeof(buf[recps].recipe_bitmap),
6186 ICE_NONDMA_TO_NONDMA);
6188 status = ICE_ERR_BAD_PTR;
6191 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6194 rm->root_rid = (u8)rid;
6196 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6200 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6201 ice_release_change_lock(hw);
6205 /* Every recipe that just got created add it to the recipe
6208 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6209 struct ice_switch_info *sw = hw->switch_info;
6210 bool is_root, idx_found = false;
6211 struct ice_sw_recipe *recp;
6212 u16 idx, buf_idx = 0;
6214 /* find buffer index for copying some data */
6215 for (idx = 0; idx < rm->n_grp_count; idx++)
6216 if (buf[idx].recipe_indx == entry->rid) {
6222 status = ICE_ERR_OUT_OF_RANGE;
6226 recp = &sw->recp_list[entry->rid];
6227 is_root = (rm->root_rid == entry->rid);
6228 recp->is_root = is_root;
6230 recp->root_rid = entry->rid;
6231 recp->big_recp = (is_root && rm->n_grp_count > 1);
6233 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6234 entry->r_group.n_val_pairs *
6235 sizeof(struct ice_fv_word),
6236 ICE_NONDMA_TO_NONDMA);
6238 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6239 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6241 /* Copy non-result fv index values and masks to recipe. This
6242 * call will also update the result recipe bitmask.
6244 ice_collect_result_idx(&buf[buf_idx], recp);
6246 /* for non-root recipes, also copy to the root, this allows
6247 * easier matching of a complete chained recipe
6250 ice_collect_result_idx(&buf[buf_idx],
6251 &sw->recp_list[rm->root_rid]);
6253 recp->n_ext_words = entry->r_group.n_val_pairs;
6254 recp->chain_idx = entry->chain_idx;
6255 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6256 recp->n_grp_count = rm->n_grp_count;
6257 recp->tun_type = rm->tun_type;
6258 recp->recp_created = true;
6272 * ice_create_recipe_group - creates recipe group
6273 * @hw: pointer to hardware structure
6274 * @rm: recipe management list entry
6275 * @lkup_exts: lookup elements
6277 static enum ice_status
6278 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6279 struct ice_prot_lkup_ext *lkup_exts)
6281 enum ice_status status;
6284 rm->n_grp_count = 0;
6286 /* Create recipes for words that are marked not done by packing them
6289 status = ice_create_first_fit_recp_def(hw, lkup_exts,
6290 &rm->rg_list, &recp_count);
6292 rm->n_grp_count += recp_count;
6293 rm->n_ext_words = lkup_exts->n_val_words;
6294 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6295 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6296 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6297 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6304 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6305 * @hw: pointer to hardware structure
6306 * @lkups: lookup elements or match criteria for the advanced recipe, one
6307 * structure per protocol header
6308 * @lkups_cnt: number of protocols
6309 * @bm: bitmap of field vectors to consider
6310 * @fv_list: pointer to a list that holds the returned field vectors
6312 static enum ice_status
6313 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6314 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6316 enum ice_status status;
6323 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6325 return ICE_ERR_NO_MEMORY;
6327 for (i = 0; i < lkups_cnt; i++)
6328 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6329 status = ICE_ERR_CFG;
6333 /* Find field vectors that include all specified protocol types */
6334 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6337 ice_free(hw, prot_ids);
6342 * ice_tun_type_match_mask - determine if tun type needs a match mask
6343 * @tun_type: tunnel type
6344 * @mask: mask to be used for the tunnel
6346 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6349 case ICE_SW_TUN_VXLAN_GPE:
6350 case ICE_SW_TUN_GENEVE:
6351 case ICE_SW_TUN_VXLAN:
6352 case ICE_SW_TUN_NVGRE:
6353 case ICE_SW_TUN_UDP:
6354 case ICE_ALL_TUNNELS:
6355 *mask = ICE_TUN_FLAG_MASK;
6358 case ICE_SW_TUN_GENEVE_VLAN:
6359 case ICE_SW_TUN_VXLAN_VLAN:
6360 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6370 * ice_add_special_words - Add words that are not protocols, such as metadata
6371 * @rinfo: other information regarding the rule e.g. priority and action info
6372 * @lkup_exts: lookup word structure
6374 static enum ice_status
6375 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6376 struct ice_prot_lkup_ext *lkup_exts)
6380 /* If this is a tunneled packet, then add recipe index to match the
6381 * tunnel bit in the packet metadata flags.
6383 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6384 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6385 u8 word = lkup_exts->n_val_words++;
6387 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6388 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6389 lkup_exts->field_mask[word] = mask;
6391 return ICE_ERR_MAX_LIMIT;
6398 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6399 * @hw: pointer to hardware structure
6400 * @rinfo: other information regarding the rule e.g. priority and action info
6401 * @bm: pointer to memory for returning the bitmap of field vectors
6404 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6407 enum ice_prof_type prof_type;
6409 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6411 switch (rinfo->tun_type) {
6413 prof_type = ICE_PROF_NON_TUN;
6415 case ICE_ALL_TUNNELS:
6416 prof_type = ICE_PROF_TUN_ALL;
6418 case ICE_SW_TUN_VXLAN_GPE:
6419 case ICE_SW_TUN_GENEVE:
6420 case ICE_SW_TUN_GENEVE_VLAN:
6421 case ICE_SW_TUN_VXLAN:
6422 case ICE_SW_TUN_VXLAN_VLAN:
6423 case ICE_SW_TUN_UDP:
6424 case ICE_SW_TUN_GTP:
6425 prof_type = ICE_PROF_TUN_UDP;
6427 case ICE_SW_TUN_NVGRE:
6428 prof_type = ICE_PROF_TUN_GRE;
6430 case ICE_SW_TUN_PPPOE:
6431 prof_type = ICE_PROF_TUN_PPPOE;
6433 case ICE_SW_TUN_PPPOE_PAY:
6434 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6436 case ICE_SW_TUN_PPPOE_IPV4:
6437 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6438 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6439 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6441 case ICE_SW_TUN_PPPOE_IPV4_TCP:
6442 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6444 case ICE_SW_TUN_PPPOE_IPV4_UDP:
6445 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6447 case ICE_SW_TUN_PPPOE_IPV6:
6448 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6449 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6450 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6452 case ICE_SW_TUN_PPPOE_IPV6_TCP:
6453 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6455 case ICE_SW_TUN_PPPOE_IPV6_UDP:
6456 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6458 case ICE_SW_TUN_PROFID_IPV6_ESP:
6459 case ICE_SW_TUN_IPV6_ESP:
6460 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6462 case ICE_SW_TUN_PROFID_IPV6_AH:
6463 case ICE_SW_TUN_IPV6_AH:
6464 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6466 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6467 case ICE_SW_TUN_IPV6_L2TPV3:
6468 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6470 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6471 case ICE_SW_TUN_IPV6_NAT_T:
6472 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6474 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6475 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6477 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6478 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6480 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6481 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6483 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6484 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6486 case ICE_SW_TUN_IPV4_NAT_T:
6487 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6489 case ICE_SW_TUN_IPV4_L2TPV3:
6490 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6492 case ICE_SW_TUN_IPV4_ESP:
6493 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6495 case ICE_SW_TUN_IPV4_AH:
6496 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6498 case ICE_SW_IPV4_TCP:
6499 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6501 case ICE_SW_IPV4_UDP:
6502 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6504 case ICE_SW_IPV6_TCP:
6505 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6507 case ICE_SW_IPV6_UDP:
6508 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6510 case ICE_SW_TUN_AND_NON_TUN:
6512 prof_type = ICE_PROF_ALL;
6516 ice_get_sw_fv_bitmap(hw, prof_type, bm);
6520 * ice_is_prof_rule - determine if rule type is a profile rule
6521 * @type: the rule type
6523 * if the rule type is a profile rule, that means that there no field value
6524 * match required, in this case just a profile hit is required.
6526 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6529 case ICE_SW_TUN_PROFID_IPV6_ESP:
6530 case ICE_SW_TUN_PROFID_IPV6_AH:
6531 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6532 case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6533 case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6534 case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6535 case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6536 case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6546 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6547 * @hw: pointer to hardware structure
6548 * @lkups: lookup elements or match criteria for the advanced recipe, one
6549 * structure per protocol header
6550 * @lkups_cnt: number of protocols
6551 * @rinfo: other information regarding the rule e.g. priority and action info
6552 * @rid: return the recipe ID of the recipe created
6554 static enum ice_status
6555 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6556 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6558 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6559 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6560 struct ice_prot_lkup_ext *lkup_exts;
6561 struct ice_recp_grp_entry *r_entry;
6562 struct ice_sw_fv_list_entry *fvit;
6563 struct ice_recp_grp_entry *r_tmp;
6564 struct ice_sw_fv_list_entry *tmp;
6565 enum ice_status status = ICE_SUCCESS;
6566 struct ice_sw_recipe *rm;
6569 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6570 return ICE_ERR_PARAM;
6572 lkup_exts = (struct ice_prot_lkup_ext *)
6573 ice_malloc(hw, sizeof(*lkup_exts));
6575 return ICE_ERR_NO_MEMORY;
6577 /* Determine the number of words to be matched and if it exceeds a
6578 * recipe's restrictions
6580 for (i = 0; i < lkups_cnt; i++) {
6583 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6584 status = ICE_ERR_CFG;
6585 goto err_free_lkup_exts;
6588 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6590 status = ICE_ERR_CFG;
6591 goto err_free_lkup_exts;
6595 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6597 status = ICE_ERR_NO_MEMORY;
6598 goto err_free_lkup_exts;
6601 /* Get field vectors that contain fields extracted from all the protocol
6602 * headers being programmed.
6604 INIT_LIST_HEAD(&rm->fv_list);
6605 INIT_LIST_HEAD(&rm->rg_list);
6607 /* Get bitmap of field vectors (profiles) that are compatible with the
6608 * rule request; only these will be searched in the subsequent call to
6611 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6613 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6617 /* Create any special protocol/offset pairs, such as looking at tunnel
6618 * bits by extracting metadata
6620 status = ice_add_special_words(rinfo, lkup_exts);
6622 goto err_free_lkup_exts;
6624 /* Group match words into recipes using preferred recipe grouping
6627 status = ice_create_recipe_group(hw, rm, lkup_exts);
6631 /* set the recipe priority if specified */
6632 rm->priority = (u8)rinfo->priority;
6634 /* Find offsets from the field vector. Pick the first one for all the
6637 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6641 /* An empty FV list means to use all the profiles returned in the
6644 if (LIST_EMPTY(&rm->fv_list)) {
6647 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
6648 struct ice_sw_fv_list_entry *fvl;
6650 fvl = (struct ice_sw_fv_list_entry *)
6651 ice_malloc(hw, sizeof(*fvl));
6655 fvl->profile_id = j;
6656 LIST_ADD(&fvl->list_entry, &rm->fv_list);
6660 /* get bitmap of all profiles the recipe will be associated with */
6661 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6662 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6664 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6665 ice_set_bit((u16)fvit->profile_id, profiles);
6668 /* Look for a recipe which matches our requested fv / mask list */
6669 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6670 if (*rid < ICE_MAX_NUM_RECIPES)
6671 /* Success if found a recipe that match the existing criteria */
6674 rm->tun_type = rinfo->tun_type;
6675 /* Recipe we need does not exist, add a recipe */
6676 status = ice_add_sw_recipe(hw, rm, profiles);
6680 /* Associate all the recipes created with all the profiles in the
6681 * common field vector.
6683 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6685 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6688 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6689 (u8 *)r_bitmap, NULL);
6693 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6694 ICE_MAX_NUM_RECIPES);
6695 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6699 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6702 ice_release_change_lock(hw);
6707 /* Update profile to recipe bitmap array */
6708 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6709 ICE_MAX_NUM_RECIPES);
6711 /* Update recipe to profile bitmap array */
6712 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
6713 ice_set_bit((u16)fvit->profile_id,
6714 recipe_to_profile[j]);
6717 *rid = rm->root_rid;
6718 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6719 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6721 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6722 ice_recp_grp_entry, l_entry) {
6723 LIST_DEL(&r_entry->l_entry);
6724 ice_free(hw, r_entry);
6727 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6729 LIST_DEL(&fvit->list_entry);
6734 ice_free(hw, rm->root_buf);
6739 ice_free(hw, lkup_exts);
6745 * ice_find_dummy_packet - find dummy packet by tunnel type
6747 * @lkups: lookup elements or match criteria for the advanced recipe, one
6748 * structure per protocol header
6749 * @lkups_cnt: number of protocols
6750 * @tun_type: tunnel type from the match criteria
6751 * @pkt: dummy packet to fill according to filter match criteria
6752 * @pkt_len: packet length of dummy packet
6753 * @offsets: pointer to receive the pointer to the offsets for the packet
6756 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6757 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6759 const struct ice_dummy_pkt_offsets **offsets)
6761 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6765 for (i = 0; i < lkups_cnt; i++) {
6766 if (lkups[i].type == ICE_UDP_ILOS)
6768 else if (lkups[i].type == ICE_TCP_IL)
6770 else if (lkups[i].type == ICE_IPV6_OFOS)
6772 else if (lkups[i].type == ICE_VLAN_OFOS)
6774 else if (lkups[i].type == ICE_IPV4_OFOS &&
6775 lkups[i].h_u.ipv4_hdr.protocol ==
6776 ICE_IPV4_NVGRE_PROTO_ID &&
6777 lkups[i].m_u.ipv4_hdr.protocol ==
6780 else if (lkups[i].type == ICE_PPPOE &&
6781 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6782 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6783 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6786 else if (lkups[i].type == ICE_ETYPE_OL &&
6787 lkups[i].h_u.ethertype.ethtype_id ==
6788 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6789 lkups[i].m_u.ethertype.ethtype_id ==
6792 else if (lkups[i].type == ICE_IPV4_IL &&
6793 lkups[i].h_u.ipv4_hdr.protocol ==
6795 lkups[i].m_u.ipv4_hdr.protocol ==
6800 if (tun_type == ICE_SW_TUN_IPV4_ESP) {
6801 *pkt = dummy_ipv4_esp_pkt;
6802 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
6803 *offsets = dummy_ipv4_esp_packet_offsets;
6807 if (tun_type == ICE_SW_TUN_IPV6_ESP) {
6808 *pkt = dummy_ipv6_esp_pkt;
6809 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
6810 *offsets = dummy_ipv6_esp_packet_offsets;
6814 if (tun_type == ICE_SW_TUN_IPV4_AH) {
6815 *pkt = dummy_ipv4_ah_pkt;
6816 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
6817 *offsets = dummy_ipv4_ah_packet_offsets;
6821 if (tun_type == ICE_SW_TUN_IPV6_AH) {
6822 *pkt = dummy_ipv6_ah_pkt;
6823 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
6824 *offsets = dummy_ipv6_ah_packet_offsets;
6828 if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
6829 *pkt = dummy_ipv4_nat_pkt;
6830 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
6831 *offsets = dummy_ipv4_nat_packet_offsets;
6835 if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
6836 *pkt = dummy_ipv6_nat_pkt;
6837 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
6838 *offsets = dummy_ipv6_nat_packet_offsets;
6842 if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
6843 *pkt = dummy_ipv4_l2tpv3_pkt;
6844 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
6845 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
6849 if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
6850 *pkt = dummy_ipv6_l2tpv3_pkt;
6851 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
6852 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
6856 if (tun_type == ICE_SW_TUN_GTP) {
6857 *pkt = dummy_udp_gtp_packet;
6858 *pkt_len = sizeof(dummy_udp_gtp_packet);
6859 *offsets = dummy_udp_gtp_packet_offsets;
6863 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6864 *pkt = dummy_pppoe_ipv6_packet;
6865 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6866 *offsets = dummy_pppoe_packet_offsets;
6868 } else if (tun_type == ICE_SW_TUN_PPPOE ||
6869 tun_type == ICE_SW_TUN_PPPOE_PAY) {
6870 *pkt = dummy_pppoe_ipv4_packet;
6871 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6872 *offsets = dummy_pppoe_packet_offsets;
6876 if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
6877 *pkt = dummy_pppoe_ipv4_packet;
6878 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6879 *offsets = dummy_pppoe_packet_ipv4_offsets;
6883 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
6884 *pkt = dummy_pppoe_ipv4_tcp_packet;
6885 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
6886 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
6890 if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
6891 *pkt = dummy_pppoe_ipv4_udp_packet;
6892 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
6893 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
6897 if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
6898 *pkt = dummy_pppoe_ipv6_packet;
6899 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6900 *offsets = dummy_pppoe_packet_ipv6_offsets;
6904 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
6905 *pkt = dummy_pppoe_ipv6_tcp_packet;
6906 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
6907 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
6911 if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
6912 *pkt = dummy_pppoe_ipv6_udp_packet;
6913 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
6914 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
6918 if (tun_type == ICE_SW_IPV4_TCP) {
6919 *pkt = dummy_tcp_packet;
6920 *pkt_len = sizeof(dummy_tcp_packet);
6921 *offsets = dummy_tcp_packet_offsets;
6925 if (tun_type == ICE_SW_IPV4_UDP) {
6926 *pkt = dummy_udp_packet;
6927 *pkt_len = sizeof(dummy_udp_packet);
6928 *offsets = dummy_udp_packet_offsets;
6932 if (tun_type == ICE_SW_IPV6_TCP) {
6933 *pkt = dummy_tcp_ipv6_packet;
6934 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6935 *offsets = dummy_tcp_ipv6_packet_offsets;
6939 if (tun_type == ICE_SW_IPV6_UDP) {
6940 *pkt = dummy_udp_ipv6_packet;
6941 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6942 *offsets = dummy_udp_ipv6_packet_offsets;
6946 if (tun_type == ICE_ALL_TUNNELS) {
6947 *pkt = dummy_gre_udp_packet;
6948 *pkt_len = sizeof(dummy_gre_udp_packet);
6949 *offsets = dummy_gre_udp_packet_offsets;
6953 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6955 *pkt = dummy_gre_tcp_packet;
6956 *pkt_len = sizeof(dummy_gre_tcp_packet);
6957 *offsets = dummy_gre_tcp_packet_offsets;
6961 *pkt = dummy_gre_udp_packet;
6962 *pkt_len = sizeof(dummy_gre_udp_packet);
6963 *offsets = dummy_gre_udp_packet_offsets;
6967 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6968 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
6969 tun_type == ICE_SW_TUN_GENEVE_VLAN ||
6970 tun_type == ICE_SW_TUN_VXLAN_VLAN) {
6972 *pkt = dummy_udp_tun_tcp_packet;
6973 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6974 *offsets = dummy_udp_tun_tcp_packet_offsets;
6978 *pkt = dummy_udp_tun_udp_packet;
6979 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6980 *offsets = dummy_udp_tun_udp_packet_offsets;
6986 *pkt = dummy_vlan_udp_packet;
6987 *pkt_len = sizeof(dummy_vlan_udp_packet);
6988 *offsets = dummy_vlan_udp_packet_offsets;
6991 *pkt = dummy_udp_packet;
6992 *pkt_len = sizeof(dummy_udp_packet);
6993 *offsets = dummy_udp_packet_offsets;
6995 } else if (udp && ipv6) {
6997 *pkt = dummy_vlan_udp_ipv6_packet;
6998 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6999 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7002 *pkt = dummy_udp_ipv6_packet;
7003 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7004 *offsets = dummy_udp_ipv6_packet_offsets;
7006 } else if ((tcp && ipv6) || ipv6) {
7008 *pkt = dummy_vlan_tcp_ipv6_packet;
7009 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7010 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7013 *pkt = dummy_tcp_ipv6_packet;
7014 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7015 *offsets = dummy_tcp_ipv6_packet_offsets;
7020 *pkt = dummy_vlan_tcp_packet;
7021 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7022 *offsets = dummy_vlan_tcp_packet_offsets;
7024 *pkt = dummy_tcp_packet;
7025 *pkt_len = sizeof(dummy_tcp_packet);
7026 *offsets = dummy_tcp_packet_offsets;
7031 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7033 * @lkups: lookup elements or match criteria for the advanced recipe, one
7034 * structure per protocol header
7035 * @lkups_cnt: number of protocols
7036 * @s_rule: stores rule information from the match criteria
7037 * @dummy_pkt: dummy packet to fill according to filter match criteria
7038 * @pkt_len: packet length of dummy packet
7039 * @offsets: offset info for the dummy packet
7041 static enum ice_status
7042 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7043 struct ice_aqc_sw_rules_elem *s_rule,
7044 const u8 *dummy_pkt, u16 pkt_len,
7045 const struct ice_dummy_pkt_offsets *offsets)
7050 /* Start with a packet with a pre-defined/dummy content. Then, fill
7051 * in the header values to be looked up or matched.
7053 pkt = s_rule->pdata.lkup_tx_rx.hdr;
7055 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7057 for (i = 0; i < lkups_cnt; i++) {
7058 enum ice_protocol_type type;
7059 u16 offset = 0, len = 0, j;
7062 /* find the start of this layer; it should be found since this
7063 * was already checked when search for the dummy packet
7065 type = lkups[i].type;
7066 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7067 if (type == offsets[j].type) {
7068 offset = offsets[j].offset;
7073 /* this should never happen in a correct calling sequence */
7075 return ICE_ERR_PARAM;
7077 switch (lkups[i].type) {
7080 len = sizeof(struct ice_ether_hdr);
7083 len = sizeof(struct ice_ethtype_hdr);
7086 len = sizeof(struct ice_vlan_hdr);
7090 len = sizeof(struct ice_ipv4_hdr);
7094 len = sizeof(struct ice_ipv6_hdr);
7099 len = sizeof(struct ice_l4_hdr);
7102 len = sizeof(struct ice_sctp_hdr);
7105 len = sizeof(struct ice_nvgre);
7110 len = sizeof(struct ice_udp_tnl_hdr);
7114 len = sizeof(struct ice_udp_gtp_hdr);
7117 len = sizeof(struct ice_pppoe_hdr);
7120 len = sizeof(struct ice_esp_hdr);
7123 len = sizeof(struct ice_nat_t_hdr);
7126 len = sizeof(struct ice_ah_hdr);
7129 len = sizeof(struct ice_l2tpv3_sess_hdr);
7132 return ICE_ERR_PARAM;
7135 /* the length should be a word multiple */
7136 if (len % ICE_BYTES_PER_WORD)
7139 /* We have the offset to the header start, the length, the
7140 * caller's header values and mask. Use this information to
7141 * copy the data into the dummy packet appropriately based on
7142 * the mask. Note that we need to only write the bits as
7143 * indicated by the mask to make sure we don't improperly write
7144 * over any significant packet data.
7146 for (j = 0; j < len / sizeof(u16); j++)
7147 if (((u16 *)&lkups[i].m_u)[j])
7148 ((u16 *)(pkt + offset))[j] =
7149 (((u16 *)(pkt + offset))[j] &
7150 ~((u16 *)&lkups[i].m_u)[j]) |
7151 (((u16 *)&lkups[i].h_u)[j] &
7152 ((u16 *)&lkups[i].m_u)[j]);
7155 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7161 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7162 * @hw: pointer to the hardware structure
7163 * @tun_type: tunnel type
7164 * @pkt: dummy packet to fill in
7165 * @offsets: offset info for the dummy packet
7167 static enum ice_status
7168 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7169 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7174 case ICE_SW_TUN_AND_NON_TUN:
7175 case ICE_SW_TUN_VXLAN_GPE:
7176 case ICE_SW_TUN_VXLAN:
7177 case ICE_SW_TUN_VXLAN_VLAN:
7178 case ICE_SW_TUN_UDP:
7179 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7183 case ICE_SW_TUN_GENEVE:
7184 case ICE_SW_TUN_GENEVE_VLAN:
7185 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7190 /* Nothing needs to be done for this tunnel type */
7194 /* Find the outer UDP protocol header and insert the port number */
7195 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7196 if (offsets[i].type == ICE_UDP_OF) {
7197 struct ice_l4_hdr *hdr;
7200 offset = offsets[i].offset;
7201 hdr = (struct ice_l4_hdr *)&pkt[offset];
7202 hdr->dst_port = CPU_TO_BE16(open_port);
7212 * ice_find_adv_rule_entry - Search a rule entry
7213 * @hw: pointer to the hardware structure
7214 * @lkups: lookup elements or match criteria for the advanced recipe, one
7215 * structure per protocol header
7216 * @lkups_cnt: number of protocols
7217 * @recp_id: recipe ID for which we are finding the rule
7218 * @rinfo: other information regarding the rule e.g. priority and action info
7220 * Helper function to search for a given advance rule entry
7221 * Returns pointer to entry storing the rule if found
7223 static struct ice_adv_fltr_mgmt_list_entry *
7224 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7225 u16 lkups_cnt, u16 recp_id,
7226 struct ice_adv_rule_info *rinfo)
7228 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7229 struct ice_switch_info *sw = hw->switch_info;
7232 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7233 ice_adv_fltr_mgmt_list_entry, list_entry) {
7234 bool lkups_matched = true;
7236 if (lkups_cnt != list_itr->lkups_cnt)
7238 for (i = 0; i < list_itr->lkups_cnt; i++)
7239 if (memcmp(&list_itr->lkups[i], &lkups[i],
7241 lkups_matched = false;
7244 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7245 rinfo->tun_type == list_itr->rule_info.tun_type &&
7253 * ice_adv_add_update_vsi_list
7254 * @hw: pointer to the hardware structure
7255 * @m_entry: pointer to current adv filter management list entry
7256 * @cur_fltr: filter information from the book keeping entry
7257 * @new_fltr: filter information with the new VSI to be added
7259 * Call AQ command to add or update previously created VSI list with new VSI.
7261 * Helper function to do book keeping associated with adding filter information
7262 * The algorithm to do the booking keeping is described below :
7263 * When a VSI needs to subscribe to a given advanced filter
7264 * if only one VSI has been added till now
7265 * Allocate a new VSI list and add two VSIs
7266 * to this list using switch rule command
7267 * Update the previously created switch rule with the
7268 * newly created VSI list ID
7269 * if a VSI list was previously created
7270 * Add the new VSI to the previously created VSI list set
7271 * using the update switch rule command
7273 static enum ice_status
7274 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7275 struct ice_adv_fltr_mgmt_list_entry *m_entry,
7276 struct ice_adv_rule_info *cur_fltr,
7277 struct ice_adv_rule_info *new_fltr)
7279 enum ice_status status;
7280 u16 vsi_list_id = 0;
7282 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7283 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7284 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7285 return ICE_ERR_NOT_IMPL;
7287 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7288 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7289 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7290 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7291 return ICE_ERR_NOT_IMPL;
7293 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7294 /* Only one entry existed in the mapping and it was not already
7295 * a part of a VSI list. So, create a VSI list with the old and
7298 struct ice_fltr_info tmp_fltr;
7299 u16 vsi_handle_arr[2];
7301 /* A rule already exists with the new VSI being added */
7302 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7303 new_fltr->sw_act.fwd_id.hw_vsi_id)
7304 return ICE_ERR_ALREADY_EXISTS;
7306 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7307 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7308 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7314 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7315 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7316 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7317 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7318 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7319 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7321 /* Update the previous switch rule of "forward to VSI" to
7324 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7328 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7329 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7330 m_entry->vsi_list_info =
7331 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7334 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7336 if (!m_entry->vsi_list_info)
7339 /* A rule already exists with the new VSI being added */
7340 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7343 /* Update the previously created VSI list set with
7344 * the new VSI ID passed in
7346 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7348 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7350 ice_aqc_opc_update_sw_rules,
7352 /* update VSI list mapping info with new VSI ID */
7354 ice_set_bit(vsi_handle,
7355 m_entry->vsi_list_info->vsi_map);
7358 m_entry->vsi_count++;
7363 * ice_add_adv_rule - helper function to create an advanced switch rule
7364 * @hw: pointer to the hardware structure
7365 * @lkups: information on the words that needs to be looked up. All words
7366 * together makes one recipe
7367 * @lkups_cnt: num of entries in the lkups array
7368 * @rinfo: other information related to the rule that needs to be programmed
7369 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7370 * ignored is case of error.
7372 * This function can program only 1 rule at a time. The lkups is used to
7373 * describe the all the words that forms the "lookup" portion of the recipe.
7374 * These words can span multiple protocols. Callers to this function need to
7375 * pass in a list of protocol headers with lookup information along and mask
7376 * that determines which words are valid from the given protocol header.
7377 * rinfo describes other information related to this rule such as forwarding
7378 * IDs, priority of this rule, etc.
7381 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7382 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7383 struct ice_rule_query_data *added_entry)
7385 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7386 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7387 const struct ice_dummy_pkt_offsets *pkt_offsets;
7388 struct ice_aqc_sw_rules_elem *s_rule = NULL;
7389 struct LIST_HEAD_TYPE *rule_head;
7390 struct ice_switch_info *sw;
7391 enum ice_status status;
7392 const u8 *pkt = NULL;
7398 /* Initialize profile to result index bitmap */
7399 if (!hw->switch_info->prof_res_bm_init) {
7400 hw->switch_info->prof_res_bm_init = 1;
7401 ice_init_prof_result_bm(hw);
7404 prof_rule = ice_is_prof_rule(rinfo->tun_type);
7405 if (!prof_rule && !lkups_cnt)
7406 return ICE_ERR_PARAM;
7408 /* get # of words we need to match */
7410 for (i = 0; i < lkups_cnt; i++) {
7413 ptr = (u16 *)&lkups[i].m_u;
7414 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7420 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7421 return ICE_ERR_PARAM;
7423 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7424 return ICE_ERR_PARAM;
7427 /* make sure that we can locate a dummy packet */
7428 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7431 status = ICE_ERR_PARAM;
7432 goto err_ice_add_adv_rule;
7435 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7436 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7437 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7438 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7441 vsi_handle = rinfo->sw_act.vsi_handle;
7442 if (!ice_is_vsi_valid(hw, vsi_handle))
7443 return ICE_ERR_PARAM;
7445 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7446 rinfo->sw_act.fwd_id.hw_vsi_id =
7447 ice_get_hw_vsi_num(hw, vsi_handle);
7448 if (rinfo->sw_act.flag & ICE_FLTR_TX)
7449 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7451 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7454 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7456 /* we have to add VSI to VSI_LIST and increment vsi_count.
7457 * Also Update VSI list so that we can change forwarding rule
7458 * if the rule already exists, we will check if it exists with
7459 * same vsi_id, if not then add it to the VSI list if it already
7460 * exists if not then create a VSI list and add the existing VSI
7461 * ID and the new VSI ID to the list
7462 * We will add that VSI to the list
7464 status = ice_adv_add_update_vsi_list(hw, m_entry,
7465 &m_entry->rule_info,
7468 added_entry->rid = rid;
7469 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7470 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7474 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7475 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7477 return ICE_ERR_NO_MEMORY;
7478 act |= ICE_SINGLE_ACT_LAN_ENABLE;
7479 switch (rinfo->sw_act.fltr_act) {
7480 case ICE_FWD_TO_VSI:
7481 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7482 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7483 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7486 act |= ICE_SINGLE_ACT_TO_Q;
7487 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7488 ICE_SINGLE_ACT_Q_INDEX_M;
7490 case ICE_FWD_TO_QGRP:
7491 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7492 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7493 act |= ICE_SINGLE_ACT_TO_Q;
7494 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7495 ICE_SINGLE_ACT_Q_INDEX_M;
7496 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7497 ICE_SINGLE_ACT_Q_REGION_M;
7499 case ICE_DROP_PACKET:
7500 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7501 ICE_SINGLE_ACT_VALID_BIT;
7504 status = ICE_ERR_CFG;
7505 goto err_ice_add_adv_rule;
7508 /* set the rule LOOKUP type based on caller specified 'RX'
7509 * instead of hardcoding it to be either LOOKUP_TX/RX
7511 * for 'RX' set the source to be the port number
7512 * for 'TX' set the source to be the source HW VSI number (determined
7516 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7517 s_rule->pdata.lkup_tx_rx.src =
7518 CPU_TO_LE16(hw->port_info->lport);
7520 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7521 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7524 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7525 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7527 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7528 pkt_len, pkt_offsets);
7530 goto err_ice_add_adv_rule;
7532 if (rinfo->tun_type != ICE_NON_TUN &&
7533 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7534 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7535 s_rule->pdata.lkup_tx_rx.hdr,
7538 goto err_ice_add_adv_rule;
7541 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7542 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7545 goto err_ice_add_adv_rule;
7546 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7547 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7549 status = ICE_ERR_NO_MEMORY;
7550 goto err_ice_add_adv_rule;
7553 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7554 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7555 ICE_NONDMA_TO_NONDMA);
7556 if (!adv_fltr->lkups && !prof_rule) {
7557 status = ICE_ERR_NO_MEMORY;
7558 goto err_ice_add_adv_rule;
7561 adv_fltr->lkups_cnt = lkups_cnt;
7562 adv_fltr->rule_info = *rinfo;
7563 adv_fltr->rule_info.fltr_rule_id =
7564 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7565 sw = hw->switch_info;
7566 sw->recp_list[rid].adv_rule = true;
7567 rule_head = &sw->recp_list[rid].filt_rules;
7569 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7570 adv_fltr->vsi_count = 1;
7572 /* Add rule entry to book keeping list */
7573 LIST_ADD(&adv_fltr->list_entry, rule_head);
7575 added_entry->rid = rid;
7576 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7577 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7579 err_ice_add_adv_rule:
7580 if (status && adv_fltr) {
7581 ice_free(hw, adv_fltr->lkups);
7582 ice_free(hw, adv_fltr);
7585 ice_free(hw, s_rule);
7591 * ice_adv_rem_update_vsi_list
7592 * @hw: pointer to the hardware structure
7593 * @vsi_handle: VSI handle of the VSI to remove
7594 * @fm_list: filter management entry for which the VSI list management needs to
7597 static enum ice_status
7598 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7599 struct ice_adv_fltr_mgmt_list_entry *fm_list)
7601 struct ice_vsi_list_map_info *vsi_list_info;
7602 enum ice_sw_lkup_type lkup_type;
7603 enum ice_status status;
7606 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7607 fm_list->vsi_count == 0)
7608 return ICE_ERR_PARAM;
7610 /* A rule with the VSI being removed does not exist */
7611 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7612 return ICE_ERR_DOES_NOT_EXIST;
7614 lkup_type = ICE_SW_LKUP_LAST;
7615 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7616 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7617 ice_aqc_opc_update_sw_rules,
7622 fm_list->vsi_count--;
7623 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7624 vsi_list_info = fm_list->vsi_list_info;
7625 if (fm_list->vsi_count == 1) {
7626 struct ice_fltr_info tmp_fltr;
7629 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7631 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7632 return ICE_ERR_OUT_OF_RANGE;
7634 /* Make sure VSI list is empty before removing it below */
7635 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7637 ice_aqc_opc_update_sw_rules,
7642 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7643 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7644 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7645 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7646 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7647 tmp_fltr.fwd_id.hw_vsi_id =
7648 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7649 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7650 ice_get_hw_vsi_num(hw, rem_vsi_handle);
7651 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7653 /* Update the previous switch rule of "MAC forward to VSI" to
7654 * "MAC fwd to VSI list"
7656 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7658 ice_debug(hw, ICE_DBG_SW,
7659 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7660 tmp_fltr.fwd_id.hw_vsi_id, status);
7663 fm_list->vsi_list_info->ref_cnt--;
7665 /* Remove the VSI list since it is no longer used */
7666 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7668 ice_debug(hw, ICE_DBG_SW,
7669 "Failed to remove VSI list %d, error %d\n",
7670 vsi_list_id, status);
7674 LIST_DEL(&vsi_list_info->list_entry);
7675 ice_free(hw, vsi_list_info);
7676 fm_list->vsi_list_info = NULL;
7683 * ice_rem_adv_rule - removes existing advanced switch rule
7684 * @hw: pointer to the hardware structure
7685 * @lkups: information on the words that needs to be looked up. All words
7686 * together makes one recipe
7687 * @lkups_cnt: num of entries in the lkups array
7688 * @rinfo: Its the pointer to the rule information for the rule
7690 * This function can be used to remove 1 rule at a time. The lkups is
7691 * used to describe all the words that forms the "lookup" portion of the
7692 * rule. These words can span multiple protocols. Callers to this function
7693 * need to pass in a list of protocol headers with lookup information along
7694 * and mask that determines which words are valid from the given protocol
7695 * header. rinfo describes other information related to this rule such as
7696 * forwarding IDs, priority of this rule, etc.
7699 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7700 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7702 struct ice_adv_fltr_mgmt_list_entry *list_elem;
7703 struct ice_prot_lkup_ext lkup_exts;
7704 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7705 enum ice_status status = ICE_SUCCESS;
7706 bool remove_rule = false;
7707 u16 i, rid, vsi_handle;
7709 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7710 for (i = 0; i < lkups_cnt; i++) {
7713 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7716 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7721 /* Create any special protocol/offset pairs, such as looking at tunnel
7722 * bits by extracting metadata
7724 status = ice_add_special_words(rinfo, &lkup_exts);
7728 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
7729 /* If did not find a recipe that match the existing criteria */
7730 if (rid == ICE_MAX_NUM_RECIPES)
7731 return ICE_ERR_PARAM;
7733 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
7734 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7735 /* the rule is already removed */
7738 ice_acquire_lock(rule_lock);
7739 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
7741 } else if (list_elem->vsi_count > 1) {
7742 remove_rule = false;
7743 vsi_handle = rinfo->sw_act.vsi_handle;
7744 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7746 vsi_handle = rinfo->sw_act.vsi_handle;
7747 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
7749 ice_release_lock(rule_lock);
7752 if (list_elem->vsi_count == 0)
7755 ice_release_lock(rule_lock);
7757 struct ice_aqc_sw_rules_elem *s_rule;
7760 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
7762 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
7765 return ICE_ERR_NO_MEMORY;
7766 s_rule->pdata.lkup_tx_rx.act = 0;
7767 s_rule->pdata.lkup_tx_rx.index =
7768 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
7769 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
7770 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7772 ice_aqc_opc_remove_sw_rules, NULL);
7773 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
7774 struct ice_switch_info *sw = hw->switch_info;
7776 ice_acquire_lock(rule_lock);
7777 LIST_DEL(&list_elem->list_entry);
7778 ice_free(hw, list_elem->lkups);
7779 ice_free(hw, list_elem);
7780 ice_release_lock(rule_lock);
7781 if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
7782 sw->recp_list[rid].adv_rule = false;
7784 ice_free(hw, s_rule);
7790 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
7791 * @hw: pointer to the hardware structure
7792 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
7794 * This function is used to remove 1 rule at a time. The removal is based on
7795 * the remove_entry parameter. This function will remove rule for a given
7796 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
7799 ice_rem_adv_rule_by_id(struct ice_hw *hw,
7800 struct ice_rule_query_data *remove_entry)
7802 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7803 struct LIST_HEAD_TYPE *list_head;
7804 struct ice_adv_rule_info rinfo;
7805 struct ice_switch_info *sw;
7807 sw = hw->switch_info;
7808 if (!sw->recp_list[remove_entry->rid].recp_created)
7809 return ICE_ERR_PARAM;
7810 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
7811 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
7813 if (list_itr->rule_info.fltr_rule_id ==
7814 remove_entry->rule_id) {
7815 rinfo = list_itr->rule_info;
7816 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
7817 return ice_rem_adv_rule(hw, list_itr->lkups,
7818 list_itr->lkups_cnt, &rinfo);
7821 /* either list is empty or unable to find rule */
7822 return ICE_ERR_DOES_NOT_EXIST;
7826 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
7828 * @hw: pointer to the hardware structure
7829 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
7831 * This function is used to remove all the rules for a given VSI and as soon
7832 * as removing a rule fails, it will return immediately with the error code,
7833 * else it will return ICE_SUCCESS
7835 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
7837 struct ice_adv_fltr_mgmt_list_entry *list_itr;
7838 struct ice_vsi_list_map_info *map_info;
7839 struct LIST_HEAD_TYPE *list_head;
7840 struct ice_adv_rule_info rinfo;
7841 struct ice_switch_info *sw;
7842 enum ice_status status;
7843 u16 vsi_list_id = 0;
7846 sw = hw->switch_info;
7847 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
7848 if (!sw->recp_list[rid].recp_created)
7850 if (!sw->recp_list[rid].adv_rule)
7852 list_head = &sw->recp_list[rid].filt_rules;
7854 LIST_FOR_EACH_ENTRY(list_itr, list_head,
7855 ice_adv_fltr_mgmt_list_entry, list_entry) {
7856 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7861 rinfo = list_itr->rule_info;
7862 rinfo.sw_act.vsi_handle = vsi_handle;
7863 status = ice_rem_adv_rule(hw, list_itr->lkups,
7864 list_itr->lkups_cnt, &rinfo);
7874 * ice_replay_fltr - Replay all the filters stored by a specific list head
7875 * @hw: pointer to the hardware structure
7876 * @list_head: list for which filters needs to be replayed
7877 * @recp_id: Recipe ID for which rules need to be replayed
7879 static enum ice_status
7880 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7882 struct ice_fltr_mgmt_list_entry *itr;
7883 enum ice_status status = ICE_SUCCESS;
7884 struct ice_sw_recipe *recp_list;
7885 u8 lport = hw->port_info->lport;
7886 struct LIST_HEAD_TYPE l_head;
7888 if (LIST_EMPTY(list_head))
7891 recp_list = &hw->switch_info->recp_list[recp_id];
7892 /* Move entries from the given list_head to a temporary l_head so that
7893 * they can be replayed. Otherwise when trying to re-add the same
7894 * filter, the function will return already exists
7896 LIST_REPLACE_INIT(list_head, &l_head);
7898 /* Mark the given list_head empty by reinitializing it so filters
7899 * could be added again by *handler
7901 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7903 struct ice_fltr_list_entry f_entry;
7906 f_entry.fltr_info = itr->fltr_info;
7907 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7908 status = ice_add_rule_internal(hw, recp_list, lport,
7910 if (status != ICE_SUCCESS)
7915 /* Add a filter per VSI separately */
7916 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
7918 if (!ice_is_vsi_valid(hw, vsi_handle))
7921 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7922 f_entry.fltr_info.vsi_handle = vsi_handle;
7923 f_entry.fltr_info.fwd_id.hw_vsi_id =
7924 ice_get_hw_vsi_num(hw, vsi_handle);
7925 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7926 if (recp_id == ICE_SW_LKUP_VLAN)
7927 status = ice_add_vlan_internal(hw, recp_list,
7930 status = ice_add_rule_internal(hw, recp_list,
7933 if (status != ICE_SUCCESS)
7938 /* Clear the filter management list */
7939 ice_rem_sw_rule_info(hw, &l_head);
7944 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7945 * @hw: pointer to the hardware structure
7947 * NOTE: This function does not clean up partially added filters on error.
7948 * It is up to caller of the function to issue a reset or fail early.
7950 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7952 struct ice_switch_info *sw = hw->switch_info;
7953 enum ice_status status = ICE_SUCCESS;
7956 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7957 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7959 status = ice_replay_fltr(hw, i, head);
7960 if (status != ICE_SUCCESS)
7967 * ice_replay_vsi_fltr - Replay filters for requested VSI
7968 * @hw: pointer to the hardware structure
7969 * @pi: pointer to port information structure
7970 * @sw: pointer to switch info struct for which function replays filters
7971 * @vsi_handle: driver VSI handle
7972 * @recp_id: Recipe ID for which rules need to be replayed
7973 * @list_head: list for which filters need to be replayed
7975 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7976 * It is required to pass valid VSI handle.
7978 static enum ice_status
7979 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
7980 struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
7981 struct LIST_HEAD_TYPE *list_head)
7983 struct ice_fltr_mgmt_list_entry *itr;
7984 enum ice_status status = ICE_SUCCESS;
7985 struct ice_sw_recipe *recp_list;
7988 if (LIST_EMPTY(list_head))
7990 recp_list = &sw->recp_list[recp_id];
7991 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7993 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7995 struct ice_fltr_list_entry f_entry;
7997 f_entry.fltr_info = itr->fltr_info;
7998 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7999 itr->fltr_info.vsi_handle == vsi_handle) {
8000 /* update the src in case it is VSI num */
8001 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8002 f_entry.fltr_info.src = hw_vsi_id;
8003 status = ice_add_rule_internal(hw, recp_list,
8006 if (status != ICE_SUCCESS)
8010 if (!itr->vsi_list_info ||
8011 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8013 /* Clearing it so that the logic can add it back */
8014 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8015 f_entry.fltr_info.vsi_handle = vsi_handle;
8016 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8017 /* update the src in case it is VSI num */
8018 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8019 f_entry.fltr_info.src = hw_vsi_id;
8020 if (recp_id == ICE_SW_LKUP_VLAN)
8021 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8023 status = ice_add_rule_internal(hw, recp_list,
8026 if (status != ICE_SUCCESS)
8034 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8035 * @hw: pointer to the hardware structure
8036 * @vsi_handle: driver VSI handle
8037 * @list_head: list for which filters need to be replayed
8039 * Replay the advanced rule for the given VSI.
8041 static enum ice_status
8042 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8043 struct LIST_HEAD_TYPE *list_head)
8045 struct ice_rule_query_data added_entry = { 0 };
8046 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8047 enum ice_status status = ICE_SUCCESS;
8049 if (LIST_EMPTY(list_head))
8051 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8053 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8054 u16 lk_cnt = adv_fltr->lkups_cnt;
8056 if (vsi_handle != rinfo->sw_act.vsi_handle)
8058 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8067 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8068 * @hw: pointer to the hardware structure
8069 * @pi: pointer to port information structure
8070 * @vsi_handle: driver VSI handle
8072 * Replays filters for requested VSI via vsi_handle.
8075 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8078 struct ice_switch_info *sw = hw->switch_info;
8079 enum ice_status status;
8082 /* Update the recipes that were created */
8083 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8084 struct LIST_HEAD_TYPE *head;
8086 head = &sw->recp_list[i].filt_replay_rules;
8087 if (!sw->recp_list[i].adv_rule)
8088 status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8091 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8092 if (status != ICE_SUCCESS)
8100 * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8101 * @hw: pointer to the HW struct
8102 * @sw: pointer to switch info struct for which function removes filters
8104 * Deletes the filter replay rules for given switch
8106 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8113 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8114 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8115 struct LIST_HEAD_TYPE *l_head;
8117 l_head = &sw->recp_list[i].filt_replay_rules;
8118 if (!sw->recp_list[i].adv_rule)
8119 ice_rem_sw_rule_info(hw, l_head);
8121 ice_rem_adv_rule_info(hw, l_head);
8127 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8128 * @hw: pointer to the HW struct
8130 * Deletes the filter replay rules.
8132 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8134 ice_rm_sw_replay_rule_info(hw, hw->switch_info);