1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
60 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
63 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
71 static const u8 dummy_gre_tcp_packet[] = {
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
76 0x08, 0x00, /* ICE_ETYPE_OL 12 */
78 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
79 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x2F, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
84 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
85 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
88 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00,
92 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
93 0x00, 0x00, 0x00, 0x00,
94 0x00, 0x06, 0x00, 0x00,
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x50, 0x02, 0x20, 0x00,
102 0x00, 0x00, 0x00, 0x00
105 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
107 { ICE_ETYPE_OL, 12 },
108 { ICE_IPV4_OFOS, 14 },
112 { ICE_UDP_ILOS, 76 },
113 { ICE_PROTOCOL_LAST, 0 },
116 static const u8 dummy_gre_udp_packet[] = {
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
121 0x08, 0x00, /* ICE_ETYPE_OL 12 */
123 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
124 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x2F, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
129 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
130 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
137 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x11, 0x00, 0x00,
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
144 0x00, 0x08, 0x00, 0x00,
147 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
149 { ICE_ETYPE_OL, 12 },
150 { ICE_IPV4_OFOS, 14 },
154 { ICE_VXLAN_GPE, 42 },
158 { ICE_PROTOCOL_LAST, 0 },
161 static const u8 dummy_udp_tun_tcp_packet[] = {
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x08, 0x00, /* ICE_ETYPE_OL 12 */
168 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x11, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
175 0x00, 0x46, 0x00, 0x00,
177 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
181 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
185 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
186 0x00, 0x01, 0x00, 0x00,
187 0x40, 0x06, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x50, 0x02, 0x20, 0x00,
195 0x00, 0x00, 0x00, 0x00
198 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
200 { ICE_ETYPE_OL, 12 },
201 { ICE_IPV4_OFOS, 14 },
205 { ICE_VXLAN_GPE, 42 },
208 { ICE_UDP_ILOS, 84 },
209 { ICE_PROTOCOL_LAST, 0 },
212 static const u8 dummy_udp_tun_udp_packet[] = {
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
217 0x08, 0x00, /* ICE_ETYPE_OL 12 */
219 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
226 0x00, 0x3a, 0x00, 0x00,
228 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
236 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
237 0x00, 0x01, 0x00, 0x00,
238 0x00, 0x11, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
243 0x00, 0x08, 0x00, 0x00,
246 /* offset info for MAC + IPv4 + UDP dummy packet */
247 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
249 { ICE_ETYPE_OL, 12 },
250 { ICE_IPV4_OFOS, 14 },
251 { ICE_UDP_ILOS, 34 },
252 { ICE_PROTOCOL_LAST, 0 },
255 /* Dummy packet for MAC + IPv4 + UDP */
256 static const u8 dummy_udp_packet[] = {
257 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
258 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00,
261 0x08, 0x00, /* ICE_ETYPE_OL 12 */
263 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
264 0x00, 0x01, 0x00, 0x00,
265 0x00, 0x11, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
270 0x00, 0x08, 0x00, 0x00,
272 0x00, 0x00, /* 2 bytes for 4 byte alignment */
275 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
276 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
278 { ICE_ETYPE_OL, 12 },
279 { ICE_VLAN_OFOS, 14 },
280 { ICE_IPV4_OFOS, 18 },
281 { ICE_UDP_ILOS, 38 },
282 { ICE_PROTOCOL_LAST, 0 },
285 /* C-tag (801.1Q), IPv4:UDP dummy packet */
286 static const u8 dummy_vlan_udp_packet[] = {
287 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
288 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00,
291 0x81, 0x00, /* ICE_ETYPE_OL 12 */
293 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
295 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
296 0x00, 0x01, 0x00, 0x00,
297 0x00, 0x11, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
302 0x00, 0x08, 0x00, 0x00,
304 0x00, 0x00, /* 2 bytes for 4 byte alignment */
307 /* offset info for MAC + IPv4 + TCP dummy packet */
308 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
310 { ICE_ETYPE_OL, 12 },
311 { ICE_IPV4_OFOS, 14 },
313 { ICE_PROTOCOL_LAST, 0 },
316 /* Dummy packet for MAC + IPv4 + TCP */
317 static const u8 dummy_tcp_packet[] = {
318 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
319 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00,
322 0x08, 0x00, /* ICE_ETYPE_OL 12 */
324 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
325 0x00, 0x01, 0x00, 0x00,
326 0x00, 0x06, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x50, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, /* 2 bytes for 4 byte alignment */
339 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
340 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_VLAN_OFOS, 14 },
344 { ICE_IPV4_OFOS, 18 },
346 { ICE_PROTOCOL_LAST, 0 },
349 /* C-tag (801.1Q), IPv4:TCP dummy packet */
350 static const u8 dummy_vlan_tcp_packet[] = {
351 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
352 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00,
355 0x81, 0x00, /* ICE_ETYPE_OL 12 */
357 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
359 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
360 0x00, 0x01, 0x00, 0x00,
361 0x00, 0x06, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
368 0x50, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, /* 2 bytes for 4 byte alignment */
374 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
376 { ICE_ETYPE_OL, 12 },
377 { ICE_IPV6_OFOS, 14 },
379 { ICE_PROTOCOL_LAST, 0 },
382 static const u8 dummy_tcp_ipv6_packet[] = {
383 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
389 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
390 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
391 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x50, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, /* 2 bytes for 4 byte alignment */
409 /* C-tag (802.1Q): IPv6 + TCP */
410 static const struct ice_dummy_pkt_offsets
411 dummy_vlan_tcp_ipv6_packet_offsets[] = {
413 { ICE_ETYPE_OL, 12 },
414 { ICE_VLAN_OFOS, 14 },
415 { ICE_IPV6_OFOS, 18 },
417 { ICE_PROTOCOL_LAST, 0 },
420 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
421 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x81, 0x00, /* ICE_ETYPE_OL 12 */
428 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
430 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
431 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x50, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, /* 2 bytes for 4 byte alignment */
451 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
453 { ICE_ETYPE_OL, 12 },
454 { ICE_IPV6_OFOS, 14 },
455 { ICE_UDP_ILOS, 54 },
456 { ICE_PROTOCOL_LAST, 0 },
459 /* IPv6 + UDP dummy packet */
460 static const u8 dummy_udp_ipv6_packet[] = {
461 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
462 0x00, 0x00, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00,
465 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
467 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
468 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
469 0x00, 0x00, 0x00, 0x00,
470 0x00, 0x00, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
479 0x00, 0x08, 0x00, 0x00,
481 0x00, 0x00, /* 2 bytes for 4 byte alignment */
484 /* C-tag (802.1Q): IPv6 + UDP */
485 static const struct ice_dummy_pkt_offsets
486 dummy_vlan_udp_ipv6_packet_offsets[] = {
488 { ICE_ETYPE_OL, 12 },
489 { ICE_VLAN_OFOS, 14 },
490 { ICE_IPV6_OFOS, 18 },
491 { ICE_UDP_ILOS, 58 },
492 { ICE_PROTOCOL_LAST, 0 },
495 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
496 static const u8 dummy_vlan_udp_ipv6_packet[] = {
497 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x81, 0x00, /* ICE_ETYPE_OL 12 */
503 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
505 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
506 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
507 0x00, 0x00, 0x00, 0x00,
508 0x00, 0x00, 0x00, 0x00,
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
517 0x00, 0x08, 0x00, 0x00,
519 0x00, 0x00, /* 2 bytes for 4 byte alignment */
522 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
524 { ICE_IPV4_OFOS, 14 },
527 { ICE_PROTOCOL_LAST, 0 },
530 static const u8 dummy_udp_gtp_packet[] = {
531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
532 0x00, 0x00, 0x00, 0x00,
533 0x00, 0x00, 0x00, 0x00,
536 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
537 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x11, 0x00, 0x00,
539 0x00, 0x00, 0x00, 0x00,
540 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
543 0x00, 0x1c, 0x00, 0x00,
545 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
546 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x85,
549 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
550 0x00, 0x00, 0x00, 0x00,
553 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
555 { ICE_ETYPE_OL, 12 },
556 { ICE_VLAN_OFOS, 14},
558 { ICE_PROTOCOL_LAST, 0 },
561 static const u8 dummy_pppoe_packet[] = {
562 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00,
566 0x81, 0x00, /* ICE_ETYPE_OL 12 */
568 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
570 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
573 0x00, 0x21, /* PPP Link Layer 24 */
575 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
584 /* this is a recipe to profile association bitmap */
585 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
586 ICE_MAX_NUM_PROFILES);
588 /* this is a profile to recipe association bitmap */
589 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
590 ICE_MAX_NUM_RECIPES);
592 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
595 * ice_collect_result_idx - copy result index values
596 * @buf: buffer that contains the result index
597 * @recp: the recipe struct to copy data into
599 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
600 struct ice_sw_recipe *recp)
602 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
603 ice_set_bit(buf->content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
608 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
609 * @hw: pointer to hardware structure
610 * @recps: struct that we need to populate
611 * @rid: recipe ID that we are populating
612 * @refresh_required: true if we should get recipe to profile mapping from FW
614 * This function is used to populate all the necessary entries into our
615 * bookkeeping so that we have a current list of all the recipes that are
616 * programmed in the firmware.
618 static enum ice_status
619 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
620 bool *refresh_required)
622 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
623 struct ice_aqc_recipe_data_elem *tmp;
624 u16 num_recps = ICE_MAX_NUM_RECIPES;
625 struct ice_prot_lkup_ext *lkup_exts;
626 u16 i, sub_recps, fv_word_idx = 0;
627 enum ice_status status;
629 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
631 /* we need a buffer big enough to accommodate all the recipes */
632 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
633 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
635 return ICE_ERR_NO_MEMORY;
637 tmp[0].recipe_indx = rid;
638 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
639 /* non-zero status meaning recipe doesn't exist */
643 /* Get recipe to profile map so that we can get the fv from lkups that
644 * we read for a recipe from FW. Since we want to minimize the number of
645 * times we make this FW call, just make one call and cache the copy
646 * until a new recipe is added. This operation is only required the
647 * first time to get the changes from FW. Then to search existing
648 * entries we don't need to update the cache again until another recipe
651 if (*refresh_required) {
652 ice_get_recp_to_prof_map(hw);
653 *refresh_required = false;
656 /* Start populating all the entries for recps[rid] based on lkups from
657 * firmware. Note that we are only creating the root recipe in our
660 lkup_exts = &recps[rid].lkup_exts;
662 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
663 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
664 struct ice_recp_grp_entry *rg_entry;
665 u8 prof, idx, prot = 0;
669 rg_entry = (struct ice_recp_grp_entry *)
670 ice_malloc(hw, sizeof(*rg_entry));
672 status = ICE_ERR_NO_MEMORY;
676 idx = root_bufs.recipe_indx;
677 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
679 /* Mark all result indices in this chain */
680 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
681 ice_set_bit(root_bufs.content.result_indx &
682 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
684 /* get the first profile that is associated with rid */
685 prof = ice_find_first_bit(recipe_to_profile[idx],
686 ICE_MAX_NUM_PROFILES);
687 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
688 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
690 rg_entry->fv_idx[i] = lkup_indx;
691 rg_entry->fv_mask[i] =
692 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
694 /* If the recipe is a chained recipe then all its
695 * child recipe's result will have a result index.
696 * To fill fv_words we should not use those result
697 * index, we only need the protocol ids and offsets.
698 * We will skip all the fv_idx which stores result
699 * index in them. We also need to skip any fv_idx which
700 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
701 * valid offset value.
703 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
704 rg_entry->fv_idx[i]) ||
705 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
706 rg_entry->fv_idx[i] == 0)
709 ice_find_prot_off(hw, ICE_BLK_SW, prof,
710 rg_entry->fv_idx[i], &prot, &off);
711 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
712 lkup_exts->fv_words[fv_word_idx].off = off;
715 /* populate rg_list with the data from the child entry of this
718 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
720 /* Propagate some data to the recipe database */
721 recps[idx].is_root = is_root;
722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
723 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
724 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
725 recps[idx].chain_idx = root_bufs.content.result_indx &
726 ~ICE_AQ_RECIPE_RESULT_EN;
727 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
729 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
735 /* Only do the following for root recipes entries */
736 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
737 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
738 recps[idx].root_rid = root_bufs.content.rid &
739 ~ICE_AQ_RECIPE_ID_IS_ROOT;
740 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
743 /* Complete initialization of the root recipe entry */
744 lkup_exts->n_val_words = fv_word_idx;
745 recps[rid].big_recp = (num_recps > 1);
746 recps[rid].n_grp_count = num_recps;
747 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
748 ice_memdup(hw, tmp, recps[rid].n_grp_count *
749 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
750 if (!recps[rid].root_buf)
753 /* Copy result indexes */
754 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
755 recps[rid].recp_created = true;
763 * ice_get_recp_to_prof_map - updates recipe to profile mapping
764 * @hw: pointer to hardware structure
766 * This function is used to populate recipe_to_profile matrix where index to
767 * this array is the recipe ID and the element is the mapping of which profiles
768 * is this recipe mapped to.
771 ice_get_recp_to_prof_map(struct ice_hw *hw)
773 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
776 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
779 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
780 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
781 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
783 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
784 ICE_MAX_NUM_RECIPES);
785 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
786 if (ice_is_bit_set(r_bitmap, j))
787 ice_set_bit(i, recipe_to_profile[j]);
792 * ice_init_def_sw_recp - initialize the recipe book keeping tables
793 * @hw: pointer to the HW struct
794 * @recp_list: pointer to sw recipe list
796 * Allocate memory for the entire recipe table and initialize the structures/
797 * entries corresponding to basic recipes.
800 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
802 struct ice_sw_recipe *recps;
805 recps = (struct ice_sw_recipe *)
806 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
808 return ICE_ERR_NO_MEMORY;
810 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
811 recps[i].root_rid = i;
812 INIT_LIST_HEAD(&recps[i].filt_rules);
813 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
814 INIT_LIST_HEAD(&recps[i].rg_list);
815 ice_init_lock(&recps[i].filt_rule_lock);
824 * ice_aq_get_sw_cfg - get switch configuration
825 * @hw: pointer to the hardware structure
826 * @buf: pointer to the result buffer
827 * @buf_size: length of the buffer available for response
828 * @req_desc: pointer to requested descriptor
829 * @num_elems: pointer to number of elements
830 * @cd: pointer to command details structure or NULL
832 * Get switch configuration (0x0200) to be placed in 'buff'.
833 * This admin command returns information such as initial VSI/port number
834 * and switch ID it belongs to.
836 * NOTE: *req_desc is both an input/output parameter.
837 * The caller of this function first calls this function with *request_desc set
838 * to 0. If the response from f/w has *req_desc set to 0, all the switch
839 * configuration information has been returned; if non-zero (meaning not all
840 * the information was returned), the caller should call this function again
841 * with *req_desc set to the previous value returned by f/w to get the
842 * next block of switch configuration information.
844 * *num_elems is output only parameter. This reflects the number of elements
845 * in response buffer. The caller of this function to use *num_elems while
846 * parsing the response buffer.
848 static enum ice_status
849 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
850 u16 buf_size, u16 *req_desc, u16 *num_elems,
851 struct ice_sq_cd *cd)
853 struct ice_aqc_get_sw_cfg *cmd;
854 enum ice_status status;
855 struct ice_aq_desc desc;
857 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
858 cmd = &desc.params.get_sw_conf;
859 cmd->element = CPU_TO_LE16(*req_desc);
861 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
863 *req_desc = LE16_TO_CPU(cmd->element);
864 *num_elems = LE16_TO_CPU(cmd->num_elems);
871 * ice_alloc_sw - allocate resources specific to switch
872 * @hw: pointer to the HW struct
873 * @ena_stats: true to turn on VEB stats
874 * @shared_res: true for shared resource, false for dedicated resource
875 * @sw_id: switch ID returned
876 * @counter_id: VEB counter ID returned
878 * allocates switch resources (SWID and VEB counter) (0x0208)
881 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
884 struct ice_aqc_alloc_free_res_elem *sw_buf;
885 struct ice_aqc_res_elem *sw_ele;
886 enum ice_status status;
889 buf_len = sizeof(*sw_buf);
890 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
891 ice_malloc(hw, buf_len);
893 return ICE_ERR_NO_MEMORY;
895 /* Prepare buffer for switch ID.
896 * The number of resource entries in buffer is passed as 1 since only a
897 * single switch/VEB instance is allocated, and hence a single sw_id
900 sw_buf->num_elems = CPU_TO_LE16(1);
902 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
903 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
904 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
906 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
907 ice_aqc_opc_alloc_res, NULL);
910 goto ice_alloc_sw_exit;
912 sw_ele = &sw_buf->elem[0];
913 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
916 /* Prepare buffer for VEB Counter */
917 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
918 struct ice_aqc_alloc_free_res_elem *counter_buf;
919 struct ice_aqc_res_elem *counter_ele;
921 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
922 ice_malloc(hw, buf_len);
924 status = ICE_ERR_NO_MEMORY;
925 goto ice_alloc_sw_exit;
928 /* The number of resource entries in buffer is passed as 1 since
929 * only a single switch/VEB instance is allocated, and hence a
930 * single VEB counter is requested.
932 counter_buf->num_elems = CPU_TO_LE16(1);
933 counter_buf->res_type =
934 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
935 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
936 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
940 ice_free(hw, counter_buf);
941 goto ice_alloc_sw_exit;
943 counter_ele = &counter_buf->elem[0];
944 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
945 ice_free(hw, counter_buf);
949 ice_free(hw, sw_buf);
954 * ice_free_sw - free resources specific to switch
955 * @hw: pointer to the HW struct
956 * @sw_id: switch ID returned
957 * @counter_id: VEB counter ID returned
959 * free switch resources (SWID and VEB counter) (0x0209)
961 * NOTE: This function frees multiple resources. It continues
962 * releasing other resources even after it encounters error.
963 * The error code returned is the last error it encountered.
965 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
967 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
968 enum ice_status status, ret_status;
971 buf_len = sizeof(*sw_buf);
972 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
973 ice_malloc(hw, buf_len);
975 return ICE_ERR_NO_MEMORY;
977 /* Prepare buffer to free for switch ID res.
978 * The number of resource entries in buffer is passed as 1 since only a
979 * single switch/VEB instance is freed, and hence a single sw_id
982 sw_buf->num_elems = CPU_TO_LE16(1);
983 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
984 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
986 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
987 ice_aqc_opc_free_res, NULL);
990 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
992 /* Prepare buffer to free for VEB Counter resource */
993 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
994 ice_malloc(hw, buf_len);
996 ice_free(hw, sw_buf);
997 return ICE_ERR_NO_MEMORY;
1000 /* The number of resource entries in buffer is passed as 1 since only a
1001 * single switch/VEB instance is freed, and hence a single VEB counter
1004 counter_buf->num_elems = CPU_TO_LE16(1);
1005 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1006 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1008 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1009 ice_aqc_opc_free_res, NULL);
1011 ice_debug(hw, ICE_DBG_SW,
1012 "VEB counter resource could not be freed\n");
1013 ret_status = status;
1016 ice_free(hw, counter_buf);
1017 ice_free(hw, sw_buf);
1023 * @hw: pointer to the HW struct
1024 * @vsi_ctx: pointer to a VSI context struct
1025 * @cd: pointer to command details structure or NULL
1027 * Add a VSI context to the hardware (0x0210)
1030 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1031 struct ice_sq_cd *cd)
1033 struct ice_aqc_add_update_free_vsi_resp *res;
1034 struct ice_aqc_add_get_update_free_vsi *cmd;
1035 struct ice_aq_desc desc;
1036 enum ice_status status;
1038 cmd = &desc.params.vsi_cmd;
1039 res = &desc.params.add_update_free_vsi_res;
1041 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1043 if (!vsi_ctx->alloc_from_pool)
1044 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1045 ICE_AQ_VSI_IS_VALID);
1047 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1049 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1051 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1052 sizeof(vsi_ctx->info), cd);
1055 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1056 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1057 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1065 * @hw: pointer to the HW struct
1066 * @vsi_ctx: pointer to a VSI context struct
1067 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1068 * @cd: pointer to command details structure or NULL
1070 * Free VSI context info from hardware (0x0213)
1073 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1074 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1076 struct ice_aqc_add_update_free_vsi_resp *resp;
1077 struct ice_aqc_add_get_update_free_vsi *cmd;
1078 struct ice_aq_desc desc;
1079 enum ice_status status;
1081 cmd = &desc.params.vsi_cmd;
1082 resp = &desc.params.add_update_free_vsi_res;
1084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1086 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1088 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1090 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1092 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1093 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1101 * @hw: pointer to the HW struct
1102 * @vsi_ctx: pointer to a VSI context struct
1103 * @cd: pointer to command details structure or NULL
1105 * Update VSI context in the hardware (0x0211)
1108 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1109 struct ice_sq_cd *cd)
1111 struct ice_aqc_add_update_free_vsi_resp *resp;
1112 struct ice_aqc_add_get_update_free_vsi *cmd;
1113 struct ice_aq_desc desc;
1114 enum ice_status status;
1116 cmd = &desc.params.vsi_cmd;
1117 resp = &desc.params.add_update_free_vsi_res;
1119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1121 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1123 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1125 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1126 sizeof(vsi_ctx->info), cd);
1129 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1130 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1137 * ice_is_vsi_valid - check whether the VSI is valid or not
1138 * @hw: pointer to the HW struct
1139 * @vsi_handle: VSI handle
1141 * check whether the VSI is valid or not
1143 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1145 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1149 * ice_get_hw_vsi_num - return the HW VSI number
1150 * @hw: pointer to the HW struct
1151 * @vsi_handle: VSI handle
1153 * return the HW VSI number
1154 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1156 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1158 return hw->vsi_ctx[vsi_handle]->vsi_num;
1162 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1163 * @hw: pointer to the HW struct
1164 * @vsi_handle: VSI handle
1166 * return the VSI context entry for a given VSI handle
1168 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1170 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1174 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1175 * @hw: pointer to the HW struct
1176 * @vsi_handle: VSI handle
1177 * @vsi: VSI context pointer
1179 * save the VSI context entry for a given VSI handle
1182 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1184 hw->vsi_ctx[vsi_handle] = vsi;
1188 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1189 * @hw: pointer to the HW struct
1190 * @vsi_handle: VSI handle
1192 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1194 struct ice_vsi_ctx *vsi;
1197 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1200 ice_for_each_traffic_class(i) {
1201 if (vsi->lan_q_ctx[i]) {
1202 ice_free(hw, vsi->lan_q_ctx[i]);
1203 vsi->lan_q_ctx[i] = NULL;
1209 * ice_clear_vsi_ctx - clear the VSI context entry
1210 * @hw: pointer to the HW struct
1211 * @vsi_handle: VSI handle
1213 * clear the VSI context entry
1215 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1217 struct ice_vsi_ctx *vsi;
1219 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1221 ice_clear_vsi_q_ctx(hw, vsi_handle);
1223 hw->vsi_ctx[vsi_handle] = NULL;
1228 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1229 * @hw: pointer to the HW struct
1231 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1235 for (i = 0; i < ICE_MAX_VSI; i++)
1236 ice_clear_vsi_ctx(hw, i);
1240 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1241 * @hw: pointer to the HW struct
1242 * @vsi_handle: unique VSI handle provided by drivers
1243 * @vsi_ctx: pointer to a VSI context struct
1244 * @cd: pointer to command details structure or NULL
1246 * Add a VSI context to the hardware also add it into the VSI handle list.
1247 * If this function gets called after reset for existing VSIs then update
1248 * with the new HW VSI number in the corresponding VSI handle list entry.
1251 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1252 struct ice_sq_cd *cd)
1254 struct ice_vsi_ctx *tmp_vsi_ctx;
1255 enum ice_status status;
1257 if (vsi_handle >= ICE_MAX_VSI)
1258 return ICE_ERR_PARAM;
1259 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1262 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1264 /* Create a new VSI context */
1265 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1266 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1268 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1269 return ICE_ERR_NO_MEMORY;
1271 *tmp_vsi_ctx = *vsi_ctx;
1273 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1275 /* update with new HW VSI num */
1276 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1283 * ice_free_vsi- free VSI context from hardware and VSI handle list
1284 * @hw: pointer to the HW struct
1285 * @vsi_handle: unique VSI handle
1286 * @vsi_ctx: pointer to a VSI context struct
1287 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1288 * @cd: pointer to command details structure or NULL
1290 * Free VSI context info from hardware as well as from VSI handle list
1293 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1294 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1296 enum ice_status status;
1298 if (!ice_is_vsi_valid(hw, vsi_handle))
1299 return ICE_ERR_PARAM;
1300 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1301 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1303 ice_clear_vsi_ctx(hw, vsi_handle);
1309 * @hw: pointer to the HW struct
1310 * @vsi_handle: unique VSI handle
1311 * @vsi_ctx: pointer to a VSI context struct
1312 * @cd: pointer to command details structure or NULL
1314 * Update VSI context in the hardware
1317 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1318 struct ice_sq_cd *cd)
1320 if (!ice_is_vsi_valid(hw, vsi_handle))
1321 return ICE_ERR_PARAM;
1322 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1323 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1327 * ice_aq_get_vsi_params
1328 * @hw: pointer to the HW struct
1329 * @vsi_ctx: pointer to a VSI context struct
1330 * @cd: pointer to command details structure or NULL
1332 * Get VSI context info from hardware (0x0212)
1335 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1336 struct ice_sq_cd *cd)
1338 struct ice_aqc_add_get_update_free_vsi *cmd;
1339 struct ice_aqc_get_vsi_resp *resp;
1340 struct ice_aq_desc desc;
1341 enum ice_status status;
1343 cmd = &desc.params.vsi_cmd;
1344 resp = &desc.params.get_vsi_resp;
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1348 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1350 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1351 sizeof(vsi_ctx->info), cd);
1353 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1355 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1356 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1363 * ice_aq_add_update_mir_rule - add/update a mirror rule
1364 * @hw: pointer to the HW struct
1365 * @rule_type: Rule Type
1366 * @dest_vsi: VSI number to which packets will be mirrored
1367 * @count: length of the list
1368 * @mr_buf: buffer for list of mirrored VSI numbers
1369 * @cd: pointer to command details structure or NULL
1372 * Add/Update Mirror Rule (0x260).
1375 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1376 u16 count, struct ice_mir_rule_buf *mr_buf,
1377 struct ice_sq_cd *cd, u16 *rule_id)
1379 struct ice_aqc_add_update_mir_rule *cmd;
1380 struct ice_aq_desc desc;
1381 enum ice_status status;
1382 __le16 *mr_list = NULL;
1385 switch (rule_type) {
1386 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1387 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1388 /* Make sure count and mr_buf are set for these rule_types */
1389 if (!(count && mr_buf))
1390 return ICE_ERR_PARAM;
1392 buf_size = count * sizeof(__le16);
1393 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1395 return ICE_ERR_NO_MEMORY;
1397 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1398 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1399 /* Make sure count and mr_buf are not set for these
1402 if (count || mr_buf)
1403 return ICE_ERR_PARAM;
1406 ice_debug(hw, ICE_DBG_SW,
1407 "Error due to unsupported rule_type %u\n", rule_type);
1408 return ICE_ERR_OUT_OF_RANGE;
1411 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1413 /* Pre-process 'mr_buf' items for add/update of virtual port
1414 * ingress/egress mirroring (but not physical port ingress/egress
1420 for (i = 0; i < count; i++) {
1423 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1425 /* Validate specified VSI number, make sure it is less
1426 * than ICE_MAX_VSI, if not return with error.
1428 if (id >= ICE_MAX_VSI) {
1429 ice_debug(hw, ICE_DBG_SW,
1430 "Error VSI index (%u) out-of-range\n",
1432 ice_free(hw, mr_list);
1433 return ICE_ERR_OUT_OF_RANGE;
1436 /* add VSI to mirror rule */
1439 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1440 else /* remove VSI from mirror rule */
1441 mr_list[i] = CPU_TO_LE16(id);
1445 cmd = &desc.params.add_update_rule;
1446 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1447 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1448 ICE_AQC_RULE_ID_VALID_M);
1449 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1450 cmd->num_entries = CPU_TO_LE16(count);
1451 cmd->dest = CPU_TO_LE16(dest_vsi);
1453 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1455 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1457 ice_free(hw, mr_list);
1463 * ice_aq_delete_mir_rule - delete a mirror rule
1464 * @hw: pointer to the HW struct
1465 * @rule_id: Mirror rule ID (to be deleted)
1466 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1467 * otherwise it is returned to the shared pool
1468 * @cd: pointer to command details structure or NULL
1470 * Delete Mirror Rule (0x261).
1473 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1474 struct ice_sq_cd *cd)
1476 struct ice_aqc_delete_mir_rule *cmd;
1477 struct ice_aq_desc desc;
1479 /* rule_id should be in the range 0...63 */
1480 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1481 return ICE_ERR_OUT_OF_RANGE;
1483 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1485 cmd = &desc.params.del_rule;
1486 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1487 cmd->rule_id = CPU_TO_LE16(rule_id);
1490 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1492 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1496 * ice_aq_alloc_free_vsi_list
1497 * @hw: pointer to the HW struct
1498 * @vsi_list_id: VSI list ID returned or used for lookup
1499 * @lkup_type: switch rule filter lookup type
1500 * @opc: switch rules population command type - pass in the command opcode
1502 * allocates or free a VSI list resource
1504 static enum ice_status
1505 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1506 enum ice_sw_lkup_type lkup_type,
1507 enum ice_adminq_opc opc)
1509 struct ice_aqc_alloc_free_res_elem *sw_buf;
1510 struct ice_aqc_res_elem *vsi_ele;
1511 enum ice_status status;
1514 buf_len = sizeof(*sw_buf);
1515 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1516 ice_malloc(hw, buf_len);
1518 return ICE_ERR_NO_MEMORY;
1519 sw_buf->num_elems = CPU_TO_LE16(1);
1521 if (lkup_type == ICE_SW_LKUP_MAC ||
1522 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1523 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1524 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1525 lkup_type == ICE_SW_LKUP_PROMISC ||
1526 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1527 lkup_type == ICE_SW_LKUP_LAST) {
1528 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1529 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1531 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1533 status = ICE_ERR_PARAM;
1534 goto ice_aq_alloc_free_vsi_list_exit;
1537 if (opc == ice_aqc_opc_free_res)
1538 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1540 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1542 goto ice_aq_alloc_free_vsi_list_exit;
1544 if (opc == ice_aqc_opc_alloc_res) {
1545 vsi_ele = &sw_buf->elem[0];
1546 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1549 ice_aq_alloc_free_vsi_list_exit:
1550 ice_free(hw, sw_buf);
1555 * ice_aq_set_storm_ctrl - Sets storm control configuration
1556 * @hw: pointer to the HW struct
1557 * @bcast_thresh: represents the upper threshold for broadcast storm control
1558 * @mcast_thresh: represents the upper threshold for multicast storm control
1559 * @ctl_bitmask: storm control control knobs
1561 * Sets the storm control configuration (0x0280)
1564 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1567 struct ice_aqc_storm_cfg *cmd;
1568 struct ice_aq_desc desc;
1570 cmd = &desc.params.storm_conf;
1572 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1574 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1575 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1576 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1578 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1582 * ice_aq_get_storm_ctrl - gets storm control configuration
1583 * @hw: pointer to the HW struct
1584 * @bcast_thresh: represents the upper threshold for broadcast storm control
1585 * @mcast_thresh: represents the upper threshold for multicast storm control
1586 * @ctl_bitmask: storm control control knobs
1588 * Gets the storm control configuration (0x0281)
1591 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1594 enum ice_status status;
1595 struct ice_aq_desc desc;
1597 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1599 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1601 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1604 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1607 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1610 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1617 * ice_aq_sw_rules - add/update/remove switch rules
1618 * @hw: pointer to the HW struct
1619 * @rule_list: pointer to switch rule population list
1620 * @rule_list_sz: total size of the rule list in bytes
1621 * @num_rules: number of switch rules in the rule_list
1622 * @opc: switch rules population command type - pass in the command opcode
1623 * @cd: pointer to command details structure or NULL
1625 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1627 static enum ice_status
1628 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1629 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1631 struct ice_aq_desc desc;
1633 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1635 if (opc != ice_aqc_opc_add_sw_rules &&
1636 opc != ice_aqc_opc_update_sw_rules &&
1637 opc != ice_aqc_opc_remove_sw_rules)
1638 return ICE_ERR_PARAM;
1640 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1642 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1643 desc.params.sw_rules.num_rules_fltr_entry_index =
1644 CPU_TO_LE16(num_rules);
1645 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1649 * ice_aq_add_recipe - add switch recipe
1650 * @hw: pointer to the HW struct
1651 * @s_recipe_list: pointer to switch rule population list
1652 * @num_recipes: number of switch recipes in the list
1653 * @cd: pointer to command details structure or NULL
1658 ice_aq_add_recipe(struct ice_hw *hw,
1659 struct ice_aqc_recipe_data_elem *s_recipe_list,
1660 u16 num_recipes, struct ice_sq_cd *cd)
1662 struct ice_aqc_add_get_recipe *cmd;
1663 struct ice_aq_desc desc;
1666 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1667 cmd = &desc.params.add_get_recipe;
1668 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1670 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1671 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1673 buf_size = num_recipes * sizeof(*s_recipe_list);
1675 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1679 * ice_aq_get_recipe - get switch recipe
1680 * @hw: pointer to the HW struct
1681 * @s_recipe_list: pointer to switch rule population list
1682 * @num_recipes: pointer to the number of recipes (input and output)
1683 * @recipe_root: root recipe number of recipe(s) to retrieve
1684 * @cd: pointer to command details structure or NULL
1688 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1689 * On output, *num_recipes will equal the number of entries returned in
1692 * The caller must supply enough space in s_recipe_list to hold all possible
1693 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1696 ice_aq_get_recipe(struct ice_hw *hw,
1697 struct ice_aqc_recipe_data_elem *s_recipe_list,
1698 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1700 struct ice_aqc_add_get_recipe *cmd;
1701 struct ice_aq_desc desc;
1702 enum ice_status status;
1705 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1706 return ICE_ERR_PARAM;
1708 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1709 cmd = &desc.params.add_get_recipe;
1710 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1712 cmd->return_index = CPU_TO_LE16(recipe_root);
1713 cmd->num_sub_recipes = 0;
1715 buf_size = *num_recipes * sizeof(*s_recipe_list);
1717 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1718 /* cppcheck-suppress constArgument */
1719 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1725 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1726 * @hw: pointer to the HW struct
1727 * @profile_id: package profile ID to associate the recipe with
1728 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1729 * @cd: pointer to command details structure or NULL
1730 * Recipe to profile association (0x0291)
1733 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1734 struct ice_sq_cd *cd)
1736 struct ice_aqc_recipe_to_profile *cmd;
1737 struct ice_aq_desc desc;
1739 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1740 cmd = &desc.params.recipe_to_profile;
1741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1742 cmd->profile_id = CPU_TO_LE16(profile_id);
1743 /* Set the recipe ID bit in the bitmask to let the device know which
1744 * profile we are associating the recipe to
1746 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1747 ICE_NONDMA_TO_NONDMA);
1749 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1753 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1754 * @hw: pointer to the HW struct
1755 * @profile_id: package profile ID to associate the recipe with
1756 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1757 * @cd: pointer to command details structure or NULL
1758 * Associate profile ID with given recipe (0x0293)
1761 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1762 struct ice_sq_cd *cd)
1764 struct ice_aqc_recipe_to_profile *cmd;
1765 struct ice_aq_desc desc;
1766 enum ice_status status;
1768 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1769 cmd = &desc.params.recipe_to_profile;
1770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1771 cmd->profile_id = CPU_TO_LE16(profile_id);
1773 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1775 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1776 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1782 * ice_alloc_recipe - add recipe resource
1783 * @hw: pointer to the hardware structure
1784 * @rid: recipe ID returned as response to AQ call
1786 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1788 struct ice_aqc_alloc_free_res_elem *sw_buf;
1789 enum ice_status status;
1792 buf_len = sizeof(*sw_buf);
1793 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1795 return ICE_ERR_NO_MEMORY;
1797 sw_buf->num_elems = CPU_TO_LE16(1);
1798 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1799 ICE_AQC_RES_TYPE_S) |
1800 ICE_AQC_RES_TYPE_FLAG_SHARED);
1801 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802 ice_aqc_opc_alloc_res, NULL);
1804 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1805 ice_free(hw, sw_buf);
1810 /* ice_init_port_info - Initialize port_info with switch configuration data
1811 * @pi: pointer to port_info
1812 * @vsi_port_num: VSI number or port number
1813 * @type: Type of switch element (port or VSI)
1814 * @swid: switch ID of the switch the element is attached to
1815 * @pf_vf_num: PF or VF number
1816 * @is_vf: true if the element is a VF, false otherwise
1819 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1820 u16 swid, u16 pf_vf_num, bool is_vf)
1823 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1824 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1826 pi->pf_vf_num = pf_vf_num;
1828 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1829 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1832 ice_debug(pi->hw, ICE_DBG_SW,
1833 "incorrect VSI/port type received\n");
1838 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1839 * @hw: pointer to the hardware structure
1841 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1843 struct ice_aqc_get_sw_cfg_resp *rbuf;
1844 enum ice_status status;
1845 u16 num_total_ports;
1851 num_total_ports = 1;
1853 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1854 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1857 return ICE_ERR_NO_MEMORY;
1859 /* Multiple calls to ice_aq_get_sw_cfg may be required
1860 * to get all the switch configuration information. The need
1861 * for additional calls is indicated by ice_aq_get_sw_cfg
1862 * writing a non-zero value in req_desc
1865 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1866 &req_desc, &num_elems, NULL);
1871 for (i = 0; i < num_elems; i++) {
1872 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1873 u16 pf_vf_num, swid, vsi_port_num;
1877 ele = rbuf[i].elements;
1878 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1879 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1881 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1882 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1884 swid = LE16_TO_CPU(ele->swid);
1886 if (LE16_TO_CPU(ele->pf_vf_num) &
1887 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1890 type = LE16_TO_CPU(ele->vsi_port_num) >>
1891 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1894 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1895 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1896 if (j == num_total_ports) {
1897 ice_debug(hw, ICE_DBG_SW,
1898 "more ports than expected\n");
1899 status = ICE_ERR_CFG;
1902 ice_init_port_info(hw->port_info,
1903 vsi_port_num, type, swid,
1911 } while (req_desc && !status);
1914 ice_free(hw, (void *)rbuf);
1919 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1920 * @hw: pointer to the hardware structure
1921 * @fi: filter info structure to fill/update
1923 * This helper function populates the lb_en and lan_en elements of the provided
1924 * ice_fltr_info struct using the switch's type and characteristics of the
1925 * switch rule being configured.
1927 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1931 if ((fi->flag & ICE_FLTR_TX) &&
1932 (fi->fltr_act == ICE_FWD_TO_VSI ||
1933 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1934 fi->fltr_act == ICE_FWD_TO_Q ||
1935 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1936 /* Setting LB for prune actions will result in replicated
1937 * packets to the internal switch that will be dropped.
1939 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1942 /* Set lan_en to TRUE if
1943 * 1. The switch is a VEB AND
1945 * 2.1 The lookup is a directional lookup like ethertype,
1946 * promiscuous, ethertype-MAC, promiscuous-VLAN
1947 * and default-port OR
1948 * 2.2 The lookup is VLAN, OR
1949 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1950 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1954 * The switch is a VEPA.
1956 * In all other cases, the LAN enable has to be set to false.
1959 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1960 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1961 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1962 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1963 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1964 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1965 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1966 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1967 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1968 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1977 * ice_fill_sw_rule - Helper function to fill switch rule structure
1978 * @hw: pointer to the hardware structure
1979 * @f_info: entry containing packet forwarding information
1980 * @s_rule: switch rule structure to be filled in based on mac_entry
1981 * @opc: switch rules population command type - pass in the command opcode
1984 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1985 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1987 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1995 if (opc == ice_aqc_opc_remove_sw_rules) {
1996 s_rule->pdata.lkup_tx_rx.act = 0;
1997 s_rule->pdata.lkup_tx_rx.index =
1998 CPU_TO_LE16(f_info->fltr_rule_id);
1999 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2003 eth_hdr_sz = sizeof(dummy_eth_header);
2004 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2006 /* initialize the ether header with a dummy header */
2007 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2008 ice_fill_sw_info(hw, f_info);
2010 switch (f_info->fltr_act) {
2011 case ICE_FWD_TO_VSI:
2012 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2013 ICE_SINGLE_ACT_VSI_ID_M;
2014 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2015 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2016 ICE_SINGLE_ACT_VALID_BIT;
2018 case ICE_FWD_TO_VSI_LIST:
2019 act |= ICE_SINGLE_ACT_VSI_LIST;
2020 act |= (f_info->fwd_id.vsi_list_id <<
2021 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2022 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2023 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2024 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2025 ICE_SINGLE_ACT_VALID_BIT;
2028 act |= ICE_SINGLE_ACT_TO_Q;
2029 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2030 ICE_SINGLE_ACT_Q_INDEX_M;
2032 case ICE_DROP_PACKET:
2033 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2034 ICE_SINGLE_ACT_VALID_BIT;
2036 case ICE_FWD_TO_QGRP:
2037 q_rgn = f_info->qgrp_size > 0 ?
2038 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2039 act |= ICE_SINGLE_ACT_TO_Q;
2040 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2041 ICE_SINGLE_ACT_Q_INDEX_M;
2042 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2043 ICE_SINGLE_ACT_Q_REGION_M;
2050 act |= ICE_SINGLE_ACT_LB_ENABLE;
2052 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2054 switch (f_info->lkup_type) {
2055 case ICE_SW_LKUP_MAC:
2056 daddr = f_info->l_data.mac.mac_addr;
2058 case ICE_SW_LKUP_VLAN:
2059 vlan_id = f_info->l_data.vlan.vlan_id;
2060 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2061 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2062 act |= ICE_SINGLE_ACT_PRUNE;
2063 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2066 case ICE_SW_LKUP_ETHERTYPE_MAC:
2067 daddr = f_info->l_data.ethertype_mac.mac_addr;
2069 case ICE_SW_LKUP_ETHERTYPE:
2070 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2071 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2073 case ICE_SW_LKUP_MAC_VLAN:
2074 daddr = f_info->l_data.mac_vlan.mac_addr;
2075 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2077 case ICE_SW_LKUP_PROMISC_VLAN:
2078 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2080 case ICE_SW_LKUP_PROMISC:
2081 daddr = f_info->l_data.mac_vlan.mac_addr;
2087 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2088 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2089 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2091 /* Recipe set depending on lookup type */
2092 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2093 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2094 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2097 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2098 ICE_NONDMA_TO_NONDMA);
2100 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2101 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2102 *off = CPU_TO_BE16(vlan_id);
2105 /* Create the switch rule with the final dummy Ethernet header */
2106 if (opc != ice_aqc_opc_update_sw_rules)
2107 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2111 * ice_add_marker_act
2112 * @hw: pointer to the hardware structure
2113 * @m_ent: the management entry for which sw marker needs to be added
2114 * @sw_marker: sw marker to tag the Rx descriptor with
2115 * @l_id: large action resource ID
2117 * Create a large action to hold software marker and update the switch rule
2118 * entry pointed by m_ent with newly created large action
2120 static enum ice_status
2121 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2122 u16 sw_marker, u16 l_id)
2124 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2125 /* For software marker we need 3 large actions
2126 * 1. FWD action: FWD TO VSI or VSI LIST
2127 * 2. GENERIC VALUE action to hold the profile ID
2128 * 3. GENERIC VALUE action to hold the software marker ID
2130 const u16 num_lg_acts = 3;
2131 enum ice_status status;
2137 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2138 return ICE_ERR_PARAM;
2140 /* Create two back-to-back switch rules and submit them to the HW using
2141 * one memory buffer:
2145 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2146 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2147 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2149 return ICE_ERR_NO_MEMORY;
2151 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2153 /* Fill in the first switch rule i.e. large action */
2154 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2155 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2156 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2158 /* First action VSI forwarding or VSI list forwarding depending on how
2161 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2162 m_ent->fltr_info.fwd_id.hw_vsi_id;
2164 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2165 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2166 ICE_LG_ACT_VSI_LIST_ID_M;
2167 if (m_ent->vsi_count > 1)
2168 act |= ICE_LG_ACT_VSI_LIST;
2169 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2171 /* Second action descriptor type */
2172 act = ICE_LG_ACT_GENERIC;
2174 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2175 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2177 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2178 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2180 /* Third action Marker value */
2181 act |= ICE_LG_ACT_GENERIC;
2182 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2183 ICE_LG_ACT_GENERIC_VALUE_M;
2185 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2187 /* call the fill switch rule to fill the lookup Tx Rx structure */
2188 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2189 ice_aqc_opc_update_sw_rules);
2191 /* Update the action to point to the large action ID */
2192 rx_tx->pdata.lkup_tx_rx.act =
2193 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2194 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2195 ICE_SINGLE_ACT_PTR_VAL_M));
2197 /* Use the filter rule ID of the previously created rule with single
2198 * act. Once the update happens, hardware will treat this as large
2201 rx_tx->pdata.lkup_tx_rx.index =
2202 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2204 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2205 ice_aqc_opc_update_sw_rules, NULL);
2207 m_ent->lg_act_idx = l_id;
2208 m_ent->sw_marker_id = sw_marker;
2211 ice_free(hw, lg_act);
2216 * ice_add_counter_act - add/update filter rule with counter action
2217 * @hw: pointer to the hardware structure
2218 * @m_ent: the management entry for which counter needs to be added
2219 * @counter_id: VLAN counter ID returned as part of allocate resource
2220 * @l_id: large action resource ID
2222 static enum ice_status
2223 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2224 u16 counter_id, u16 l_id)
2226 struct ice_aqc_sw_rules_elem *lg_act;
2227 struct ice_aqc_sw_rules_elem *rx_tx;
2228 enum ice_status status;
2229 /* 2 actions will be added while adding a large action counter */
2230 const int num_acts = 2;
2237 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2238 return ICE_ERR_PARAM;
2240 /* Create two back-to-back switch rules and submit them to the HW using
2241 * one memory buffer:
2245 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2246 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2247 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2250 return ICE_ERR_NO_MEMORY;
2252 rx_tx = (struct ice_aqc_sw_rules_elem *)
2253 ((u8 *)lg_act + lg_act_size);
2255 /* Fill in the first switch rule i.e. large action */
2256 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2257 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2258 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2260 /* First action VSI forwarding or VSI list forwarding depending on how
2263 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2264 m_ent->fltr_info.fwd_id.hw_vsi_id;
2266 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2267 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2268 ICE_LG_ACT_VSI_LIST_ID_M;
2269 if (m_ent->vsi_count > 1)
2270 act |= ICE_LG_ACT_VSI_LIST;
2271 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2273 /* Second action counter ID */
2274 act = ICE_LG_ACT_STAT_COUNT;
2275 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2276 ICE_LG_ACT_STAT_COUNT_M;
2277 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2279 /* call the fill switch rule to fill the lookup Tx Rx structure */
2280 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2281 ice_aqc_opc_update_sw_rules);
2283 act = ICE_SINGLE_ACT_PTR;
2284 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2285 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2287 /* Use the filter rule ID of the previously created rule with single
2288 * act. Once the update happens, hardware will treat this as large
2291 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2292 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2294 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2295 ice_aqc_opc_update_sw_rules, NULL);
2297 m_ent->lg_act_idx = l_id;
2298 m_ent->counter_index = counter_id;
2301 ice_free(hw, lg_act);
2306 * ice_create_vsi_list_map
2307 * @hw: pointer to the hardware structure
2308 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2309 * @num_vsi: number of VSI handles in the array
2310 * @vsi_list_id: VSI list ID generated as part of allocate resource
2312 * Helper function to create a new entry of VSI list ID to VSI mapping
2313 * using the given VSI list ID
2315 static struct ice_vsi_list_map_info *
2316 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2319 struct ice_switch_info *sw = hw->switch_info;
2320 struct ice_vsi_list_map_info *v_map;
2323 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2328 v_map->vsi_list_id = vsi_list_id;
2330 for (i = 0; i < num_vsi; i++)
2331 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2333 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2338 * ice_update_vsi_list_rule
2339 * @hw: pointer to the hardware structure
2340 * @vsi_handle_arr: array of VSI handles to form a VSI list
2341 * @num_vsi: number of VSI handles in the array
2342 * @vsi_list_id: VSI list ID generated as part of allocate resource
2343 * @remove: Boolean value to indicate if this is a remove action
2344 * @opc: switch rules population command type - pass in the command opcode
2345 * @lkup_type: lookup type of the filter
2347 * Call AQ command to add a new switch rule or update existing switch rule
2348 * using the given VSI list ID
2350 static enum ice_status
2351 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2352 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2353 enum ice_sw_lkup_type lkup_type)
2355 struct ice_aqc_sw_rules_elem *s_rule;
2356 enum ice_status status;
2362 return ICE_ERR_PARAM;
2364 if (lkup_type == ICE_SW_LKUP_MAC ||
2365 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2366 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2367 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2368 lkup_type == ICE_SW_LKUP_PROMISC ||
2369 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2370 lkup_type == ICE_SW_LKUP_LAST)
2371 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2372 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2373 else if (lkup_type == ICE_SW_LKUP_VLAN)
2374 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2375 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2377 return ICE_ERR_PARAM;
2379 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2380 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2382 return ICE_ERR_NO_MEMORY;
2383 for (i = 0; i < num_vsi; i++) {
2384 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2385 status = ICE_ERR_PARAM;
2388 /* AQ call requires hw_vsi_id(s) */
2389 s_rule->pdata.vsi_list.vsi[i] =
2390 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2393 s_rule->type = CPU_TO_LE16(type);
2394 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2395 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2397 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2400 ice_free(hw, s_rule);
2405 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2406 * @hw: pointer to the HW struct
2407 * @vsi_handle_arr: array of VSI handles to form a VSI list
2408 * @num_vsi: number of VSI handles in the array
2409 * @vsi_list_id: stores the ID of the VSI list to be created
2410 * @lkup_type: switch rule filter's lookup type
2412 static enum ice_status
2413 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2414 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2416 enum ice_status status;
2418 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2419 ice_aqc_opc_alloc_res);
2423 /* Update the newly created VSI list to include the specified VSIs */
2424 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2425 *vsi_list_id, false,
2426 ice_aqc_opc_add_sw_rules, lkup_type);
2430 * ice_create_pkt_fwd_rule
2431 * @hw: pointer to the hardware structure
2432 * @recp_list: corresponding filter management list
2433 * @f_entry: entry containing packet forwarding information
2435 * Create switch rule with given filter information and add an entry
2436 * to the corresponding filter management list to track this switch rule
2439 static enum ice_status
2440 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2441 struct ice_fltr_list_entry *f_entry)
2443 struct ice_fltr_mgmt_list_entry *fm_entry;
2444 struct ice_aqc_sw_rules_elem *s_rule;
2445 enum ice_status status;
2447 s_rule = (struct ice_aqc_sw_rules_elem *)
2448 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2450 return ICE_ERR_NO_MEMORY;
2451 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2452 ice_malloc(hw, sizeof(*fm_entry));
2454 status = ICE_ERR_NO_MEMORY;
2455 goto ice_create_pkt_fwd_rule_exit;
2458 fm_entry->fltr_info = f_entry->fltr_info;
2460 /* Initialize all the fields for the management entry */
2461 fm_entry->vsi_count = 1;
2462 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2463 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2464 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2466 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2467 ice_aqc_opc_add_sw_rules);
2469 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2470 ice_aqc_opc_add_sw_rules, NULL);
2472 ice_free(hw, fm_entry);
2473 goto ice_create_pkt_fwd_rule_exit;
2476 f_entry->fltr_info.fltr_rule_id =
2477 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2478 fm_entry->fltr_info.fltr_rule_id =
2479 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2481 /* The book keeping entries will get removed when base driver
2482 * calls remove filter AQ command
2484 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2486 ice_create_pkt_fwd_rule_exit:
2487 ice_free(hw, s_rule);
2492 * ice_update_pkt_fwd_rule
2493 * @hw: pointer to the hardware structure
2494 * @f_info: filter information for switch rule
2496 * Call AQ command to update a previously created switch rule with a
2499 static enum ice_status
2500 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2502 struct ice_aqc_sw_rules_elem *s_rule;
2503 enum ice_status status;
2505 s_rule = (struct ice_aqc_sw_rules_elem *)
2506 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2508 return ICE_ERR_NO_MEMORY;
2510 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2512 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2514 /* Update switch rule with new rule set to forward VSI list */
2515 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2516 ice_aqc_opc_update_sw_rules, NULL);
2518 ice_free(hw, s_rule);
2523 * ice_update_sw_rule_bridge_mode
2524 * @hw: pointer to the HW struct
2526 * Updates unicast switch filter rules based on VEB/VEPA mode
2528 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2530 struct ice_switch_info *sw = hw->switch_info;
2531 struct ice_fltr_mgmt_list_entry *fm_entry;
2532 enum ice_status status = ICE_SUCCESS;
2533 struct LIST_HEAD_TYPE *rule_head;
2534 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2536 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2537 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2539 ice_acquire_lock(rule_lock);
2540 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2542 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2543 u8 *addr = fi->l_data.mac.mac_addr;
2545 /* Update unicast Tx rules to reflect the selected
2548 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2549 (fi->fltr_act == ICE_FWD_TO_VSI ||
2550 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2551 fi->fltr_act == ICE_FWD_TO_Q ||
2552 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2553 status = ice_update_pkt_fwd_rule(hw, fi);
2559 ice_release_lock(rule_lock);
2565 * ice_add_update_vsi_list
2566 * @hw: pointer to the hardware structure
2567 * @m_entry: pointer to current filter management list entry
2568 * @cur_fltr: filter information from the book keeping entry
2569 * @new_fltr: filter information with the new VSI to be added
2571 * Call AQ command to add or update previously created VSI list with new VSI.
2573 * Helper function to do book keeping associated with adding filter information
2574 * The algorithm to do the book keeping is described below :
2575 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2576 * if only one VSI has been added till now
2577 * Allocate a new VSI list and add two VSIs
2578 * to this list using switch rule command
2579 * Update the previously created switch rule with the
2580 * newly created VSI list ID
2581 * if a VSI list was previously created
2582 * Add the new VSI to the previously created VSI list set
2583 * using the update switch rule command
2585 static enum ice_status
2586 ice_add_update_vsi_list(struct ice_hw *hw,
2587 struct ice_fltr_mgmt_list_entry *m_entry,
2588 struct ice_fltr_info *cur_fltr,
2589 struct ice_fltr_info *new_fltr)
2591 enum ice_status status = ICE_SUCCESS;
2592 u16 vsi_list_id = 0;
2594 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2595 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2596 return ICE_ERR_NOT_IMPL;
2598 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2599 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2600 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2601 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2602 return ICE_ERR_NOT_IMPL;
2604 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2605 /* Only one entry existed in the mapping and it was not already
2606 * a part of a VSI list. So, create a VSI list with the old and
2609 struct ice_fltr_info tmp_fltr;
2610 u16 vsi_handle_arr[2];
2612 /* A rule already exists with the new VSI being added */
2613 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2614 return ICE_ERR_ALREADY_EXISTS;
2616 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2617 vsi_handle_arr[1] = new_fltr->vsi_handle;
2618 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2620 new_fltr->lkup_type);
2624 tmp_fltr = *new_fltr;
2625 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2626 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2627 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2628 /* Update the previous switch rule of "MAC forward to VSI" to
2629 * "MAC fwd to VSI list"
2631 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2635 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2636 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2637 m_entry->vsi_list_info =
2638 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2641 /* If this entry was large action then the large action needs
2642 * to be updated to point to FWD to VSI list
2644 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2646 ice_add_marker_act(hw, m_entry,
2647 m_entry->sw_marker_id,
2648 m_entry->lg_act_idx);
2650 u16 vsi_handle = new_fltr->vsi_handle;
2651 enum ice_adminq_opc opcode;
2653 if (!m_entry->vsi_list_info)
2656 /* A rule already exists with the new VSI being added */
2657 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2660 /* Update the previously created VSI list set with
2661 * the new VSI ID passed in
2663 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2664 opcode = ice_aqc_opc_update_sw_rules;
2666 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2667 vsi_list_id, false, opcode,
2668 new_fltr->lkup_type);
2669 /* update VSI list mapping info with new VSI ID */
2671 ice_set_bit(vsi_handle,
2672 m_entry->vsi_list_info->vsi_map);
2675 m_entry->vsi_count++;
2680 * ice_find_rule_entry - Search a rule entry
2681 * @list_head: head of rule list
2682 * @f_info: rule information
2684 * Helper function to search for a given rule entry
2685 * Returns pointer to entry storing the rule if found
2687 static struct ice_fltr_mgmt_list_entry *
2688 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2689 struct ice_fltr_info *f_info)
2691 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2693 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2695 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2696 sizeof(f_info->l_data)) &&
2697 f_info->flag == list_itr->fltr_info.flag) {
2706 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2707 * @hw: pointer to the hardware structure
2708 * @recp_id: lookup type for which VSI lists needs to be searched
2709 * @vsi_handle: VSI handle to be found in VSI list
2710 * @vsi_list_id: VSI list ID found containing vsi_handle
2712 * Helper function to search a VSI list with single entry containing given VSI
2713 * handle element. This can be extended further to search VSI list with more
2714 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2716 static struct ice_vsi_list_map_info *
2717 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2720 struct ice_vsi_list_map_info *map_info = NULL;
2721 struct ice_switch_info *sw = hw->switch_info;
2722 struct LIST_HEAD_TYPE *list_head;
2724 list_head = &sw->recp_list[recp_id].filt_rules;
2725 if (sw->recp_list[recp_id].adv_rule) {
2726 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2728 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2729 ice_adv_fltr_mgmt_list_entry,
2731 if (list_itr->vsi_list_info) {
2732 map_info = list_itr->vsi_list_info;
2733 if (ice_is_bit_set(map_info->vsi_map,
2735 *vsi_list_id = map_info->vsi_list_id;
2741 struct ice_fltr_mgmt_list_entry *list_itr;
2743 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2744 ice_fltr_mgmt_list_entry,
2746 if (list_itr->vsi_count == 1 &&
2747 list_itr->vsi_list_info) {
2748 map_info = list_itr->vsi_list_info;
2749 if (ice_is_bit_set(map_info->vsi_map,
2751 *vsi_list_id = map_info->vsi_list_id;
2761 * ice_add_rule_internal - add rule for a given lookup type
2762 * @hw: pointer to the hardware structure
2763 * @recp_list: recipe list for which rule has to be added
2764 * @lport: logic port number on which function add rule
2765 * @f_entry: structure containing MAC forwarding information
2767 * Adds or updates the rule lists for a given recipe
2769 static enum ice_status
2770 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2771 u8 lport, struct ice_fltr_list_entry *f_entry)
2773 struct ice_fltr_info *new_fltr, *cur_fltr;
2774 struct ice_fltr_mgmt_list_entry *m_entry;
2775 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2776 enum ice_status status = ICE_SUCCESS;
2778 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2779 return ICE_ERR_PARAM;
2781 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2782 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2783 f_entry->fltr_info.fwd_id.hw_vsi_id =
2784 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2786 rule_lock = &recp_list->filt_rule_lock;
2788 ice_acquire_lock(rule_lock);
2789 new_fltr = &f_entry->fltr_info;
2790 if (new_fltr->flag & ICE_FLTR_RX)
2791 new_fltr->src = lport;
2792 else if (new_fltr->flag & ICE_FLTR_TX)
2794 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2796 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2798 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2799 goto exit_add_rule_internal;
2802 cur_fltr = &m_entry->fltr_info;
2803 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2805 exit_add_rule_internal:
2806 ice_release_lock(rule_lock);
2811 * ice_remove_vsi_list_rule
2812 * @hw: pointer to the hardware structure
2813 * @vsi_list_id: VSI list ID generated as part of allocate resource
2814 * @lkup_type: switch rule filter lookup type
2816 * The VSI list should be emptied before this function is called to remove the
2819 static enum ice_status
2820 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2821 enum ice_sw_lkup_type lkup_type)
2823 struct ice_aqc_sw_rules_elem *s_rule;
2824 enum ice_status status;
2827 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2828 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2830 return ICE_ERR_NO_MEMORY;
2832 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2833 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2835 /* Free the vsi_list resource that we allocated. It is assumed that the
2836 * list is empty at this point.
2838 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2839 ice_aqc_opc_free_res);
2841 ice_free(hw, s_rule);
2846 * ice_rem_update_vsi_list
2847 * @hw: pointer to the hardware structure
2848 * @vsi_handle: VSI handle of the VSI to remove
2849 * @fm_list: filter management entry for which the VSI list management needs to
2852 static enum ice_status
2853 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2854 struct ice_fltr_mgmt_list_entry *fm_list)
2856 enum ice_sw_lkup_type lkup_type;
2857 enum ice_status status = ICE_SUCCESS;
2860 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2861 fm_list->vsi_count == 0)
2862 return ICE_ERR_PARAM;
2864 /* A rule with the VSI being removed does not exist */
2865 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2866 return ICE_ERR_DOES_NOT_EXIST;
2868 lkup_type = fm_list->fltr_info.lkup_type;
2869 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2870 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2871 ice_aqc_opc_update_sw_rules,
2876 fm_list->vsi_count--;
2877 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2879 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2880 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2881 struct ice_vsi_list_map_info *vsi_list_info =
2882 fm_list->vsi_list_info;
2885 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2887 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2888 return ICE_ERR_OUT_OF_RANGE;
2890 /* Make sure VSI list is empty before removing it below */
2891 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2893 ice_aqc_opc_update_sw_rules,
2898 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2899 tmp_fltr_info.fwd_id.hw_vsi_id =
2900 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2901 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2902 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2904 ice_debug(hw, ICE_DBG_SW,
2905 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2906 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2910 fm_list->fltr_info = tmp_fltr_info;
2913 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2914 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2915 struct ice_vsi_list_map_info *vsi_list_info =
2916 fm_list->vsi_list_info;
2918 /* Remove the VSI list since it is no longer used */
2919 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2921 ice_debug(hw, ICE_DBG_SW,
2922 "Failed to remove VSI list %d, error %d\n",
2923 vsi_list_id, status);
2927 LIST_DEL(&vsi_list_info->list_entry);
2928 ice_free(hw, vsi_list_info);
2929 fm_list->vsi_list_info = NULL;
2936 * ice_remove_rule_internal - Remove a filter rule of a given type
2938 * @hw: pointer to the hardware structure
2939 * @recp_list: recipe list for which the rule needs to removed
2940 * @f_entry: rule entry containing filter information
2942 static enum ice_status
2943 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2944 struct ice_fltr_list_entry *f_entry)
2946 struct ice_fltr_mgmt_list_entry *list_elem;
2947 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2948 enum ice_status status = ICE_SUCCESS;
2949 bool remove_rule = false;
2952 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2953 return ICE_ERR_PARAM;
2954 f_entry->fltr_info.fwd_id.hw_vsi_id =
2955 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2957 rule_lock = &recp_list->filt_rule_lock;
2958 ice_acquire_lock(rule_lock);
2959 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2960 &f_entry->fltr_info);
2962 status = ICE_ERR_DOES_NOT_EXIST;
2966 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2968 } else if (!list_elem->vsi_list_info) {
2969 status = ICE_ERR_DOES_NOT_EXIST;
2971 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2972 /* a ref_cnt > 1 indicates that the vsi_list is being
2973 * shared by multiple rules. Decrement the ref_cnt and
2974 * remove this rule, but do not modify the list, as it
2975 * is in-use by other rules.
2977 list_elem->vsi_list_info->ref_cnt--;
2980 /* a ref_cnt of 1 indicates the vsi_list is only used
2981 * by one rule. However, the original removal request is only
2982 * for a single VSI. Update the vsi_list first, and only
2983 * remove the rule if there are no further VSIs in this list.
2985 vsi_handle = f_entry->fltr_info.vsi_handle;
2986 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2989 /* if VSI count goes to zero after updating the VSI list */
2990 if (list_elem->vsi_count == 0)
2995 /* Remove the lookup rule */
2996 struct ice_aqc_sw_rules_elem *s_rule;
2998 s_rule = (struct ice_aqc_sw_rules_elem *)
2999 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3001 status = ICE_ERR_NO_MEMORY;
3005 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3006 ice_aqc_opc_remove_sw_rules);
3008 status = ice_aq_sw_rules(hw, s_rule,
3009 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3010 ice_aqc_opc_remove_sw_rules, NULL);
3012 /* Remove a book keeping from the list */
3013 ice_free(hw, s_rule);
3018 LIST_DEL(&list_elem->list_entry);
3019 ice_free(hw, list_elem);
3022 ice_release_lock(rule_lock);
3027 * ice_aq_get_res_alloc - get allocated resources
3028 * @hw: pointer to the HW struct
3029 * @num_entries: pointer to u16 to store the number of resource entries returned
3030 * @buf: pointer to user-supplied buffer
3031 * @buf_size: size of buff
3032 * @cd: pointer to command details structure or NULL
3034 * The user-supplied buffer must be large enough to store the resource
3035 * information for all resource types. Each resource type is an
3036 * ice_aqc_get_res_resp_data_elem structure.
3039 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3040 u16 buf_size, struct ice_sq_cd *cd)
3042 struct ice_aqc_get_res_alloc *resp;
3043 enum ice_status status;
3044 struct ice_aq_desc desc;
3047 return ICE_ERR_BAD_PTR;
3049 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3050 return ICE_ERR_INVAL_SIZE;
3052 resp = &desc.params.get_res;
3054 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3055 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3057 if (!status && num_entries)
3058 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3064 * ice_aq_get_res_descs - get allocated resource descriptors
3065 * @hw: pointer to the hardware structure
3066 * @num_entries: number of resource entries in buffer
3067 * @buf: Indirect buffer to hold data parameters and response
3068 * @buf_size: size of buffer for indirect commands
3069 * @res_type: resource type
3070 * @res_shared: is resource shared
3071 * @desc_id: input - first desc ID to start; output - next desc ID
3072 * @cd: pointer to command details structure or NULL
3075 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3076 struct ice_aqc_get_allocd_res_desc_resp *buf,
3077 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3078 struct ice_sq_cd *cd)
3080 struct ice_aqc_get_allocd_res_desc *cmd;
3081 struct ice_aq_desc desc;
3082 enum ice_status status;
3084 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3086 cmd = &desc.params.get_res_desc;
3089 return ICE_ERR_PARAM;
3091 if (buf_size != (num_entries * sizeof(*buf)))
3092 return ICE_ERR_PARAM;
3094 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3096 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3097 ICE_AQC_RES_TYPE_M) | (res_shared ?
3098 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3099 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3101 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3103 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3109 * ice_add_mac_rule - Add a MAC address based filter rule
3110 * @hw: pointer to the hardware structure
3111 * @m_list: list of MAC addresses and forwarding information
3112 * @sw: pointer to switch info struct for which function add rule
3113 * @lport: logic port number on which function add rule
3115 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3116 * multiple unicast addresses, the function assumes that all the
3117 * addresses are unique in a given add_mac call. It doesn't
3118 * check for duplicates in this case, removing duplicates from a given
3119 * list should be taken care of in the caller of this function.
3121 static enum ice_status
3122 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3123 struct ice_switch_info *sw, u8 lport)
3125 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3126 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3127 struct ice_fltr_list_entry *m_list_itr;
3128 struct LIST_HEAD_TYPE *rule_head;
3129 u16 elem_sent, total_elem_left;
3130 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3131 enum ice_status status = ICE_SUCCESS;
3132 u16 num_unicast = 0;
3136 rule_lock = &recp_list->filt_rule_lock;
3137 rule_head = &recp_list->filt_rules;
3139 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3141 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3145 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3146 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3147 if (!ice_is_vsi_valid(hw, vsi_handle))
3148 return ICE_ERR_PARAM;
3149 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3150 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3151 /* update the src in case it is VSI num */
3152 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3153 return ICE_ERR_PARAM;
3154 m_list_itr->fltr_info.src = hw_vsi_id;
3155 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3156 IS_ZERO_ETHER_ADDR(add))
3157 return ICE_ERR_PARAM;
3158 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3159 /* Don't overwrite the unicast address */
3160 ice_acquire_lock(rule_lock);
3161 if (ice_find_rule_entry(rule_head,
3162 &m_list_itr->fltr_info)) {
3163 ice_release_lock(rule_lock);
3164 return ICE_ERR_ALREADY_EXISTS;
3166 ice_release_lock(rule_lock);
3168 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3169 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3170 m_list_itr->status =
3171 ice_add_rule_internal(hw, recp_list, lport,
3173 if (m_list_itr->status)
3174 return m_list_itr->status;
3178 ice_acquire_lock(rule_lock);
3179 /* Exit if no suitable entries were found for adding bulk switch rule */
3181 status = ICE_SUCCESS;
3182 goto ice_add_mac_exit;
3186 /* Allocate switch rule buffer for the bulk update for unicast */
3187 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3188 s_rule = (struct ice_aqc_sw_rules_elem *)
3189 ice_calloc(hw, num_unicast, s_rule_size);
3191 status = ICE_ERR_NO_MEMORY;
3192 goto ice_add_mac_exit;
3196 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3198 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3199 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3201 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3202 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3203 ice_aqc_opc_add_sw_rules);
3204 r_iter = (struct ice_aqc_sw_rules_elem *)
3205 ((u8 *)r_iter + s_rule_size);
3209 /* Call AQ bulk switch rule update for all unicast addresses */
3211 /* Call AQ switch rule in AQ_MAX chunk */
3212 for (total_elem_left = num_unicast; total_elem_left > 0;
3213 total_elem_left -= elem_sent) {
3214 struct ice_aqc_sw_rules_elem *entry = r_iter;
3216 elem_sent = min(total_elem_left,
3217 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3218 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3219 elem_sent, ice_aqc_opc_add_sw_rules,
3222 goto ice_add_mac_exit;
3223 r_iter = (struct ice_aqc_sw_rules_elem *)
3224 ((u8 *)r_iter + (elem_sent * s_rule_size));
3227 /* Fill up rule ID based on the value returned from FW */
3229 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3231 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3232 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3233 struct ice_fltr_mgmt_list_entry *fm_entry;
3235 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3236 f_info->fltr_rule_id =
3237 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3238 f_info->fltr_act = ICE_FWD_TO_VSI;
3239 /* Create an entry to track this MAC address */
3240 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3241 ice_malloc(hw, sizeof(*fm_entry));
3243 status = ICE_ERR_NO_MEMORY;
3244 goto ice_add_mac_exit;
3246 fm_entry->fltr_info = *f_info;
3247 fm_entry->vsi_count = 1;
3248 /* The book keeping entries will get removed when
3249 * base driver calls remove filter AQ command
3252 LIST_ADD(&fm_entry->list_entry, rule_head);
3253 r_iter = (struct ice_aqc_sw_rules_elem *)
3254 ((u8 *)r_iter + s_rule_size);
3259 ice_release_lock(rule_lock);
3261 ice_free(hw, s_rule);
3266 * ice_add_mac - Add a MAC address based filter rule
3267 * @hw: pointer to the hardware structure
3268 * @m_list: list of MAC addresses and forwarding information
3270 * Function add mac rule for logical port from hw struct
3273 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3276 return ICE_ERR_PARAM;
3278 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3279 hw->port_info->lport);
3283 * ice_add_vlan_internal - Add one VLAN based filter rule
3284 * @hw: pointer to the hardware structure
3285 * @f_entry: filter entry containing one VLAN information
3287 static enum ice_status
3288 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3290 struct ice_switch_info *sw = hw->switch_info;
3291 struct ice_fltr_mgmt_list_entry *v_list_itr;
3292 struct ice_fltr_info *new_fltr, *cur_fltr;
3293 struct ice_sw_recipe *recp_list;
3294 enum ice_sw_lkup_type lkup_type;
3295 u16 vsi_list_id = 0, vsi_handle;
3296 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3297 enum ice_status status = ICE_SUCCESS;
3299 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3300 return ICE_ERR_PARAM;
3302 f_entry->fltr_info.fwd_id.hw_vsi_id =
3303 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3304 new_fltr = &f_entry->fltr_info;
3306 /* VLAN ID should only be 12 bits */
3307 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3308 return ICE_ERR_PARAM;
3310 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3311 return ICE_ERR_PARAM;
3313 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3314 lkup_type = new_fltr->lkup_type;
3315 vsi_handle = new_fltr->vsi_handle;
3316 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3317 rule_lock = &recp_list->filt_rule_lock;
3318 ice_acquire_lock(rule_lock);
3319 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3321 struct ice_vsi_list_map_info *map_info = NULL;
3323 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3324 /* All VLAN pruning rules use a VSI list. Check if
3325 * there is already a VSI list containing VSI that we
3326 * want to add. If found, use the same vsi_list_id for
3327 * this new VLAN rule or else create a new list.
3329 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3333 status = ice_create_vsi_list_rule(hw,
3341 /* Convert the action to forwarding to a VSI list. */
3342 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3343 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3346 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3348 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3351 status = ICE_ERR_DOES_NOT_EXIST;
3354 /* reuse VSI list for new rule and increment ref_cnt */
3356 v_list_itr->vsi_list_info = map_info;
3357 map_info->ref_cnt++;
3359 v_list_itr->vsi_list_info =
3360 ice_create_vsi_list_map(hw, &vsi_handle,
3364 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3365 /* Update existing VSI list to add new VSI ID only if it used
3368 cur_fltr = &v_list_itr->fltr_info;
3369 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3372 /* If VLAN rule exists and VSI list being used by this rule is
3373 * referenced by more than 1 VLAN rule. Then create a new VSI
3374 * list appending previous VSI with new VSI and update existing
3375 * VLAN rule to point to new VSI list ID
3377 struct ice_fltr_info tmp_fltr;
3378 u16 vsi_handle_arr[2];
3381 /* Current implementation only supports reusing VSI list with
3382 * one VSI count. We should never hit below condition
3384 if (v_list_itr->vsi_count > 1 &&
3385 v_list_itr->vsi_list_info->ref_cnt > 1) {
3386 ice_debug(hw, ICE_DBG_SW,
3387 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3388 status = ICE_ERR_CFG;
3393 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3396 /* A rule already exists with the new VSI being added */
3397 if (cur_handle == vsi_handle) {
3398 status = ICE_ERR_ALREADY_EXISTS;
3402 vsi_handle_arr[0] = cur_handle;
3403 vsi_handle_arr[1] = vsi_handle;
3404 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3405 &vsi_list_id, lkup_type);
3409 tmp_fltr = v_list_itr->fltr_info;
3410 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3411 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3412 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3413 /* Update the previous switch rule to a new VSI list which
3414 * includes current VSI that is requested
3416 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3420 /* before overriding VSI list map info. decrement ref_cnt of
3423 v_list_itr->vsi_list_info->ref_cnt--;
3425 /* now update to newly created list */
3426 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3427 v_list_itr->vsi_list_info =
3428 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3430 v_list_itr->vsi_count++;
3434 ice_release_lock(rule_lock);
3439 * ice_add_vlan - Add VLAN based filter rule
3440 * @hw: pointer to the hardware structure
3441 * @v_list: list of VLAN entries and forwarding information
3444 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3446 struct ice_fltr_list_entry *v_list_itr;
3449 return ICE_ERR_PARAM;
3451 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3453 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3454 return ICE_ERR_PARAM;
3455 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3456 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3457 if (v_list_itr->status)
3458 return v_list_itr->status;
3464 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3465 * @hw: pointer to the hardware structure
3466 * @mv_list: list of MAC and VLAN filters
3468 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3469 * pruning bits enabled, then it is the responsibility of the caller to make
3470 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3471 * VLAN won't be received on that VSI otherwise.
3474 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3476 struct ice_fltr_list_entry *mv_list_itr;
3477 struct ice_sw_recipe *recp_list;
3479 if (!mv_list || !hw)
3480 return ICE_ERR_PARAM;
3482 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3483 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3485 enum ice_sw_lkup_type l_type =
3486 mv_list_itr->fltr_info.lkup_type;
3488 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3489 return ICE_ERR_PARAM;
3490 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3491 mv_list_itr->status =
3492 ice_add_rule_internal(hw, recp_list,
3493 hw->port_info->lport,
3495 if (mv_list_itr->status)
3496 return mv_list_itr->status;
3502 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3503 * @hw: pointer to the hardware structure
3504 * @em_list: list of ether type MAC filter, MAC is optional
3506 * This function requires the caller to populate the entries in
3507 * the filter list with the necessary fields (including flags to
3508 * indicate Tx or Rx rules).
3511 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3513 struct ice_fltr_list_entry *em_list_itr;
3516 if (!em_list || !hw)
3517 return ICE_ERR_PARAM;
3519 lport = hw->port_info->lport;
3520 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3522 struct ice_sw_recipe *recp_list;
3523 enum ice_sw_lkup_type l_type;
3525 l_type = em_list_itr->fltr_info.lkup_type;
3526 recp_list = &hw->switch_info->recp_list[l_type];
3528 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3529 l_type != ICE_SW_LKUP_ETHERTYPE)
3530 return ICE_ERR_PARAM;
3532 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3535 if (em_list_itr->status)
3536 return em_list_itr->status;
3542 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3543 * @hw: pointer to the hardware structure
3544 * @em_list: list of ethertype or ethertype MAC entries
3547 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3549 struct ice_fltr_list_entry *em_list_itr, *tmp;
3550 struct ice_sw_recipe *recp_list;
3552 if (!em_list || !hw)
3553 return ICE_ERR_PARAM;
3555 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3557 enum ice_sw_lkup_type l_type =
3558 em_list_itr->fltr_info.lkup_type;
3560 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3561 l_type != ICE_SW_LKUP_ETHERTYPE)
3562 return ICE_ERR_PARAM;
3564 recp_list = &hw->switch_info->recp_list[l_type];
3565 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3567 if (em_list_itr->status)
3568 return em_list_itr->status;
3574 * ice_rem_sw_rule_info
3575 * @hw: pointer to the hardware structure
3576 * @rule_head: pointer to the switch list structure that we want to delete
3579 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3581 if (!LIST_EMPTY(rule_head)) {
3582 struct ice_fltr_mgmt_list_entry *entry;
3583 struct ice_fltr_mgmt_list_entry *tmp;
3585 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3586 ice_fltr_mgmt_list_entry, list_entry) {
3587 LIST_DEL(&entry->list_entry);
3588 ice_free(hw, entry);
3594 * ice_rem_adv_rule_info
3595 * @hw: pointer to the hardware structure
3596 * @rule_head: pointer to the switch list structure that we want to delete
3599 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3601 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3602 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3604 if (LIST_EMPTY(rule_head))
3607 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3608 ice_adv_fltr_mgmt_list_entry, list_entry) {
3609 LIST_DEL(&lst_itr->list_entry);
3610 ice_free(hw, lst_itr->lkups);
3611 ice_free(hw, lst_itr);
3616 * ice_rem_all_sw_rules_info
3617 * @hw: pointer to the hardware structure
3619 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3621 struct ice_switch_info *sw = hw->switch_info;
3624 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3625 struct LIST_HEAD_TYPE *rule_head;
3627 rule_head = &sw->recp_list[i].filt_rules;
3628 if (!sw->recp_list[i].adv_rule)
3629 ice_rem_sw_rule_info(hw, rule_head);
3631 ice_rem_adv_rule_info(hw, rule_head);
3636 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3637 * @pi: pointer to the port_info structure
3638 * @vsi_handle: VSI handle to set as default
3639 * @set: true to add the above mentioned switch rule, false to remove it
3640 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3642 * add filter rule to set/unset given VSI as default VSI for the switch
3643 * (represented by swid)
3646 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3649 struct ice_aqc_sw_rules_elem *s_rule;
3650 struct ice_fltr_info f_info;
3651 struct ice_hw *hw = pi->hw;
3652 enum ice_adminq_opc opcode;
3653 enum ice_status status;
3657 if (!ice_is_vsi_valid(hw, vsi_handle))
3658 return ICE_ERR_PARAM;
3659 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3661 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3662 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3663 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3665 return ICE_ERR_NO_MEMORY;
3667 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3669 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3670 f_info.flag = direction;
3671 f_info.fltr_act = ICE_FWD_TO_VSI;
3672 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3674 if (f_info.flag & ICE_FLTR_RX) {
3675 f_info.src = pi->lport;
3676 f_info.src_id = ICE_SRC_ID_LPORT;
3678 f_info.fltr_rule_id =
3679 pi->dflt_rx_vsi_rule_id;
3680 } else if (f_info.flag & ICE_FLTR_TX) {
3681 f_info.src_id = ICE_SRC_ID_VSI;
3682 f_info.src = hw_vsi_id;
3684 f_info.fltr_rule_id =
3685 pi->dflt_tx_vsi_rule_id;
3689 opcode = ice_aqc_opc_add_sw_rules;
3691 opcode = ice_aqc_opc_remove_sw_rules;
3693 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3695 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3696 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3699 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3701 if (f_info.flag & ICE_FLTR_TX) {
3702 pi->dflt_tx_vsi_num = hw_vsi_id;
3703 pi->dflt_tx_vsi_rule_id = index;
3704 } else if (f_info.flag & ICE_FLTR_RX) {
3705 pi->dflt_rx_vsi_num = hw_vsi_id;
3706 pi->dflt_rx_vsi_rule_id = index;
3709 if (f_info.flag & ICE_FLTR_TX) {
3710 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3711 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3712 } else if (f_info.flag & ICE_FLTR_RX) {
3713 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3714 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3719 ice_free(hw, s_rule);
3724 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3725 * @list_head: head of rule list
3726 * @f_info: rule information
3728 * Helper function to search for a unicast rule entry - this is to be used
3729 * to remove unicast MAC filter that is not shared with other VSIs on the
3732 * Returns pointer to entry storing the rule if found
3734 static struct ice_fltr_mgmt_list_entry *
3735 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3736 struct ice_fltr_info *f_info)
3738 struct ice_fltr_mgmt_list_entry *list_itr;
3740 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3742 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3743 sizeof(f_info->l_data)) &&
3744 f_info->fwd_id.hw_vsi_id ==
3745 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3746 f_info->flag == list_itr->fltr_info.flag)
3753 * ice_remove_mac_rule - remove a MAC based filter rule
3754 * @hw: pointer to the hardware structure
3755 * @m_list: list of MAC addresses and forwarding information
3756 * @recp_list: list from which function remove MAC address
3758 * This function removes either a MAC filter rule or a specific VSI from a
3759 * VSI list for a multicast MAC address.
3761 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3762 * ice_add_mac. Caller should be aware that this call will only work if all
3763 * the entries passed into m_list were added previously. It will not attempt to
3764 * do a partial remove of entries that were found.
3766 static enum ice_status
3767 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3768 struct ice_sw_recipe *recp_list)
3770 struct ice_fltr_list_entry *list_itr, *tmp;
3771 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3774 return ICE_ERR_PARAM;
3776 rule_lock = &recp_list->filt_rule_lock;
3777 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3779 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3780 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3783 if (l_type != ICE_SW_LKUP_MAC)
3784 return ICE_ERR_PARAM;
3786 vsi_handle = list_itr->fltr_info.vsi_handle;
3787 if (!ice_is_vsi_valid(hw, vsi_handle))
3788 return ICE_ERR_PARAM;
3790 list_itr->fltr_info.fwd_id.hw_vsi_id =
3791 ice_get_hw_vsi_num(hw, vsi_handle);
3792 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3793 /* Don't remove the unicast address that belongs to
3794 * another VSI on the switch, since it is not being
3797 ice_acquire_lock(rule_lock);
3798 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3799 &list_itr->fltr_info)) {
3800 ice_release_lock(rule_lock);
3801 return ICE_ERR_DOES_NOT_EXIST;
3803 ice_release_lock(rule_lock);
3805 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3807 if (list_itr->status)
3808 return list_itr->status;
3814 * ice_remove_mac - remove a MAC address based filter rule
3815 * @hw: pointer to the hardware structure
3816 * @m_list: list of MAC addresses and forwarding information
3820 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3822 struct ice_sw_recipe *recp_list;
3824 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3825 return ice_remove_mac_rule(hw, m_list, recp_list);
3829 * ice_remove_vlan - Remove VLAN based filter rule
3830 * @hw: pointer to the hardware structure
3831 * @v_list: list of VLAN entries and forwarding information
3834 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3836 struct ice_fltr_list_entry *v_list_itr, *tmp;
3837 struct ice_sw_recipe *recp_list;
3840 return ICE_ERR_PARAM;
3842 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3843 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3845 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3847 if (l_type != ICE_SW_LKUP_VLAN)
3848 return ICE_ERR_PARAM;
3849 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3851 if (v_list_itr->status)
3852 return v_list_itr->status;
3858 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3859 * @hw: pointer to the hardware structure
3860 * @v_list: list of MAC VLAN entries and forwarding information
3863 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3865 struct ice_fltr_list_entry *v_list_itr, *tmp;
3866 struct ice_sw_recipe *recp_list;
3869 return ICE_ERR_PARAM;
3871 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3872 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3874 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3876 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3877 return ICE_ERR_PARAM;
3878 v_list_itr->status =
3879 ice_remove_rule_internal(hw, recp_list,
3881 if (v_list_itr->status)
3882 return v_list_itr->status;
3888 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3889 * @fm_entry: filter entry to inspect
3890 * @vsi_handle: VSI handle to compare with filter info
3893 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3895 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3896 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3897 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3898 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3903 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3904 * @hw: pointer to the hardware structure
3905 * @vsi_handle: VSI handle to remove filters from
3906 * @vsi_list_head: pointer to the list to add entry to
3907 * @fi: pointer to fltr_info of filter entry to copy & add
3909 * Helper function, used when creating a list of filters to remove from
3910 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3911 * original filter entry, with the exception of fltr_info.fltr_act and
3912 * fltr_info.fwd_id fields. These are set such that later logic can
3913 * extract which VSI to remove the fltr from, and pass on that information.
3915 static enum ice_status
3916 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3917 struct LIST_HEAD_TYPE *vsi_list_head,
3918 struct ice_fltr_info *fi)
3920 struct ice_fltr_list_entry *tmp;
3922 /* this memory is freed up in the caller function
3923 * once filters for this VSI are removed
3925 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3927 return ICE_ERR_NO_MEMORY;
3929 tmp->fltr_info = *fi;
3931 /* Overwrite these fields to indicate which VSI to remove filter from,
3932 * so find and remove logic can extract the information from the
3933 * list entries. Note that original entries will still have proper
3936 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3937 tmp->fltr_info.vsi_handle = vsi_handle;
3938 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3940 LIST_ADD(&tmp->list_entry, vsi_list_head);
3946 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3947 * @hw: pointer to the hardware structure
3948 * @vsi_handle: VSI handle to remove filters from
3949 * @lkup_list_head: pointer to the list that has certain lookup type filters
3950 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3952 * Locates all filters in lkup_list_head that are used by the given VSI,
3953 * and adds COPIES of those entries to vsi_list_head (intended to be used
3954 * to remove the listed filters).
3955 * Note that this means all entries in vsi_list_head must be explicitly
3956 * deallocated by the caller when done with list.
3958 static enum ice_status
3959 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3960 struct LIST_HEAD_TYPE *lkup_list_head,
3961 struct LIST_HEAD_TYPE *vsi_list_head)
3963 struct ice_fltr_mgmt_list_entry *fm_entry;
3964 enum ice_status status = ICE_SUCCESS;
3966 /* check to make sure VSI ID is valid and within boundary */
3967 if (!ice_is_vsi_valid(hw, vsi_handle))
3968 return ICE_ERR_PARAM;
3970 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3971 ice_fltr_mgmt_list_entry, list_entry) {
3972 struct ice_fltr_info *fi;
3974 fi = &fm_entry->fltr_info;
3975 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3978 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3987 * ice_determine_promisc_mask
3988 * @fi: filter info to parse
3990 * Helper function to determine which ICE_PROMISC_ mask corresponds
3991 * to given filter into.
3993 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3995 u16 vid = fi->l_data.mac_vlan.vlan_id;
3996 u8 *macaddr = fi->l_data.mac.mac_addr;
3997 bool is_tx_fltr = false;
3998 u8 promisc_mask = 0;
4000 if (fi->flag == ICE_FLTR_TX)
4003 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4004 promisc_mask |= is_tx_fltr ?
4005 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4006 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4007 promisc_mask |= is_tx_fltr ?
4008 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4009 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4010 promisc_mask |= is_tx_fltr ?
4011 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4013 promisc_mask |= is_tx_fltr ?
4014 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4016 return promisc_mask;
4020 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4021 * @hw: pointer to the hardware structure
4022 * @vsi_handle: VSI handle to retrieve info from
4023 * @promisc_mask: pointer to mask to be filled in
4024 * @vid: VLAN ID of promisc VLAN VSI
4027 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4030 struct ice_switch_info *sw = hw->switch_info;
4031 struct ice_fltr_mgmt_list_entry *itr;
4032 struct LIST_HEAD_TYPE *rule_head;
4033 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4035 if (!ice_is_vsi_valid(hw, vsi_handle))
4036 return ICE_ERR_PARAM;
4040 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4041 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4043 ice_acquire_lock(rule_lock);
4044 LIST_FOR_EACH_ENTRY(itr, rule_head,
4045 ice_fltr_mgmt_list_entry, list_entry) {
4046 /* Continue if this filter doesn't apply to this VSI or the
4047 * VSI ID is not in the VSI map for this filter
4049 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4052 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4054 ice_release_lock(rule_lock);
4060 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4061 * @hw: pointer to the hardware structure
4062 * @vsi_handle: VSI handle to retrieve info from
4063 * @promisc_mask: pointer to mask to be filled in
4064 * @vid: VLAN ID of promisc VLAN VSI
4067 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4070 struct ice_switch_info *sw = hw->switch_info;
4071 struct ice_fltr_mgmt_list_entry *itr;
4072 struct LIST_HEAD_TYPE *rule_head;
4073 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4075 if (!ice_is_vsi_valid(hw, vsi_handle))
4076 return ICE_ERR_PARAM;
4080 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4081 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4083 ice_acquire_lock(rule_lock);
4084 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4086 /* Continue if this filter doesn't apply to this VSI or the
4087 * VSI ID is not in the VSI map for this filter
4089 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4092 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4094 ice_release_lock(rule_lock);
4100 * ice_remove_promisc - Remove promisc based filter rules
4101 * @hw: pointer to the hardware structure
4102 * @recp_id: recipe ID for which the rule needs to removed
4103 * @v_list: list of promisc entries
4105 static enum ice_status
4106 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4107 struct LIST_HEAD_TYPE *v_list)
4109 struct ice_fltr_list_entry *v_list_itr, *tmp;
4110 struct ice_sw_recipe *recp_list;
4112 recp_list = &hw->switch_info->recp_list[recp_id];
4113 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4115 v_list_itr->status =
4116 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4117 if (v_list_itr->status)
4118 return v_list_itr->status;
4124 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4125 * @hw: pointer to the hardware structure
4126 * @vsi_handle: VSI handle to clear mode
4127 * @promisc_mask: mask of promiscuous config bits to clear
4128 * @vid: VLAN ID to clear VLAN promiscuous
4131 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4134 struct ice_switch_info *sw = hw->switch_info;
4135 struct ice_fltr_list_entry *fm_entry, *tmp;
4136 struct LIST_HEAD_TYPE remove_list_head;
4137 struct ice_fltr_mgmt_list_entry *itr;
4138 struct LIST_HEAD_TYPE *rule_head;
4139 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4140 enum ice_status status = ICE_SUCCESS;
4143 if (!ice_is_vsi_valid(hw, vsi_handle))
4144 return ICE_ERR_PARAM;
4146 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4147 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4149 recipe_id = ICE_SW_LKUP_PROMISC;
4151 rule_head = &sw->recp_list[recipe_id].filt_rules;
4152 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4154 INIT_LIST_HEAD(&remove_list_head);
4156 ice_acquire_lock(rule_lock);
4157 LIST_FOR_EACH_ENTRY(itr, rule_head,
4158 ice_fltr_mgmt_list_entry, list_entry) {
4159 struct ice_fltr_info *fltr_info;
4160 u8 fltr_promisc_mask = 0;
4162 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4164 fltr_info = &itr->fltr_info;
4166 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4167 vid != fltr_info->l_data.mac_vlan.vlan_id)
4170 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4172 /* Skip if filter is not completely specified by given mask */
4173 if (fltr_promisc_mask & ~promisc_mask)
4176 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4180 ice_release_lock(rule_lock);
4181 goto free_fltr_list;
4184 ice_release_lock(rule_lock);
4186 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4189 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4190 ice_fltr_list_entry, list_entry) {
4191 LIST_DEL(&fm_entry->list_entry);
4192 ice_free(hw, fm_entry);
4199 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4200 * @hw: pointer to the hardware structure
4201 * @vsi_handle: VSI handle to configure
4202 * @promisc_mask: mask of promiscuous config bits
4203 * @vid: VLAN ID to set VLAN promiscuous
4206 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4208 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4209 struct ice_fltr_list_entry f_list_entry;
4210 struct ice_fltr_info new_fltr;
4211 enum ice_status status = ICE_SUCCESS;
4217 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4219 if (!ice_is_vsi_valid(hw, vsi_handle))
4220 return ICE_ERR_PARAM;
4221 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4223 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4225 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4226 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4227 new_fltr.l_data.mac_vlan.vlan_id = vid;
4228 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4230 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4231 recipe_id = ICE_SW_LKUP_PROMISC;
4234 /* Separate filters must be set for each direction/packet type
4235 * combination, so we will loop over the mask value, store the
4236 * individual type, and clear it out in the input mask as it
4239 while (promisc_mask) {
4240 struct ice_sw_recipe *recp_list;
4246 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4247 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4248 pkt_type = UCAST_FLTR;
4249 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4250 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4251 pkt_type = UCAST_FLTR;
4253 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4254 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4255 pkt_type = MCAST_FLTR;
4256 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4257 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4258 pkt_type = MCAST_FLTR;
4260 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4261 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4262 pkt_type = BCAST_FLTR;
4263 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4264 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4265 pkt_type = BCAST_FLTR;
4269 /* Check for VLAN promiscuous flag */
4270 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4271 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4272 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4273 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4277 /* Set filter DA based on packet type */
4278 mac_addr = new_fltr.l_data.mac.mac_addr;
4279 if (pkt_type == BCAST_FLTR) {
4280 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4281 } else if (pkt_type == MCAST_FLTR ||
4282 pkt_type == UCAST_FLTR) {
4283 /* Use the dummy ether header DA */
4284 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4285 ICE_NONDMA_TO_NONDMA);
4286 if (pkt_type == MCAST_FLTR)
4287 mac_addr[0] |= 0x1; /* Set multicast bit */
4290 /* Need to reset this to zero for all iterations */
4293 new_fltr.flag |= ICE_FLTR_TX;
4294 new_fltr.src = hw_vsi_id;
4296 new_fltr.flag |= ICE_FLTR_RX;
4297 new_fltr.src = hw->port_info->lport;
4300 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4301 new_fltr.vsi_handle = vsi_handle;
4302 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4303 f_list_entry.fltr_info = new_fltr;
4304 recp_list = &hw->switch_info->recp_list[recipe_id];
4306 status = ice_add_rule_internal(hw, recp_list,
4307 hw->port_info->lport,
4309 if (status != ICE_SUCCESS)
4310 goto set_promisc_exit;
4318 * ice_set_vlan_vsi_promisc
4319 * @hw: pointer to the hardware structure
4320 * @vsi_handle: VSI handle to configure
4321 * @promisc_mask: mask of promiscuous config bits
4322 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4324 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4327 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4328 bool rm_vlan_promisc)
4330 struct ice_switch_info *sw = hw->switch_info;
4331 struct ice_fltr_list_entry *list_itr, *tmp;
4332 struct LIST_HEAD_TYPE vsi_list_head;
4333 struct LIST_HEAD_TYPE *vlan_head;
4334 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4335 enum ice_status status;
4338 INIT_LIST_HEAD(&vsi_list_head);
4339 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4340 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4341 ice_acquire_lock(vlan_lock);
4342 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4344 ice_release_lock(vlan_lock);
4346 goto free_fltr_list;
4348 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4350 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4351 if (rm_vlan_promisc)
4352 status = ice_clear_vsi_promisc(hw, vsi_handle,
4353 promisc_mask, vlan_id);
4355 status = ice_set_vsi_promisc(hw, vsi_handle,
4356 promisc_mask, vlan_id);
4362 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4363 ice_fltr_list_entry, list_entry) {
4364 LIST_DEL(&list_itr->list_entry);
4365 ice_free(hw, list_itr);
4371 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4372 * @hw: pointer to the hardware structure
4373 * @vsi_handle: VSI handle to remove filters from
4374 * @recp_list: recipe list from which function remove fltr
4375 * @lkup: switch rule filter lookup type
4378 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4379 struct ice_sw_recipe *recp_list,
4380 enum ice_sw_lkup_type lkup)
4382 struct ice_fltr_list_entry *fm_entry;
4383 struct LIST_HEAD_TYPE remove_list_head;
4384 struct LIST_HEAD_TYPE *rule_head;
4385 struct ice_fltr_list_entry *tmp;
4386 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4387 enum ice_status status;
4389 INIT_LIST_HEAD(&remove_list_head);
4390 rule_lock = &recp_list[lkup].filt_rule_lock;
4391 rule_head = &recp_list[lkup].filt_rules;
4392 ice_acquire_lock(rule_lock);
4393 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4395 ice_release_lock(rule_lock);
4400 case ICE_SW_LKUP_MAC:
4401 ice_remove_mac_rule(hw, &remove_list_head, recp_list);
4403 case ICE_SW_LKUP_VLAN:
4404 ice_remove_vlan(hw, &remove_list_head);
4406 case ICE_SW_LKUP_PROMISC:
4407 case ICE_SW_LKUP_PROMISC_VLAN:
4408 ice_remove_promisc(hw, lkup, &remove_list_head);
4410 case ICE_SW_LKUP_MAC_VLAN:
4411 ice_remove_mac_vlan(hw, &remove_list_head);
4413 case ICE_SW_LKUP_ETHERTYPE:
4414 case ICE_SW_LKUP_ETHERTYPE_MAC:
4415 ice_remove_eth_mac(hw, &remove_list_head);
4417 case ICE_SW_LKUP_DFLT:
4418 ice_debug(hw, ICE_DBG_SW,
4419 "Remove filters for this lookup type hasn't been implemented yet\n");
4421 case ICE_SW_LKUP_LAST:
4422 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4426 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4427 ice_fltr_list_entry, list_entry) {
4428 LIST_DEL(&fm_entry->list_entry);
4429 ice_free(hw, fm_entry);
4434 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4435 * @hw: pointer to the hardware structure
4436 * @vsi_handle: VSI handle to remove filters from
4437 * @sw: pointer to switch info struct
4440 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4441 struct ice_switch_info *sw)
4443 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4445 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4446 sw->recp_list, ICE_SW_LKUP_MAC);
4447 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4448 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4449 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4450 sw->recp_list, ICE_SW_LKUP_PROMISC);
4451 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4452 sw->recp_list, ICE_SW_LKUP_VLAN);
4453 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4454 sw->recp_list, ICE_SW_LKUP_DFLT);
4455 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4456 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4457 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4458 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4459 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4460 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4464 * ice_remove_vsi_fltr - Remove all filters for a VSI
4465 * @hw: pointer to the hardware structure
4466 * @vsi_handle: VSI handle to remove filters from
4468 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4470 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4474 * ice_alloc_res_cntr - allocating resource counter
4475 * @hw: pointer to the hardware structure
4476 * @type: type of resource
4477 * @alloc_shared: if set it is shared else dedicated
4478 * @num_items: number of entries requested for FD resource type
4479 * @counter_id: counter index returned by AQ call
4482 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4485 struct ice_aqc_alloc_free_res_elem *buf;
4486 enum ice_status status;
4489 /* Allocate resource */
4490 buf_len = sizeof(*buf);
4491 buf = (struct ice_aqc_alloc_free_res_elem *)
4492 ice_malloc(hw, buf_len);
4494 return ICE_ERR_NO_MEMORY;
4496 buf->num_elems = CPU_TO_LE16(num_items);
4497 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4498 ICE_AQC_RES_TYPE_M) | alloc_shared);
4500 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4501 ice_aqc_opc_alloc_res, NULL);
4505 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4513 * ice_free_res_cntr - free resource counter
4514 * @hw: pointer to the hardware structure
4515 * @type: type of resource
4516 * @alloc_shared: if set it is shared else dedicated
4517 * @num_items: number of entries to be freed for FD resource type
4518 * @counter_id: counter ID resource which needs to be freed
4521 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4524 struct ice_aqc_alloc_free_res_elem *buf;
4525 enum ice_status status;
4529 buf_len = sizeof(*buf);
4530 buf = (struct ice_aqc_alloc_free_res_elem *)
4531 ice_malloc(hw, buf_len);
4533 return ICE_ERR_NO_MEMORY;
4535 buf->num_elems = CPU_TO_LE16(num_items);
4536 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4537 ICE_AQC_RES_TYPE_M) | alloc_shared);
4538 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4540 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4541 ice_aqc_opc_free_res, NULL);
4543 ice_debug(hw, ICE_DBG_SW,
4544 "counter resource could not be freed\n");
4551 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4552 * @hw: pointer to the hardware structure
4553 * @counter_id: returns counter index
4555 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4557 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4558 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4563 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4564 * @hw: pointer to the hardware structure
4565 * @counter_id: counter index to be freed
4567 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4569 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4570 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4575 * ice_alloc_res_lg_act - add large action resource
4576 * @hw: pointer to the hardware structure
4577 * @l_id: large action ID to fill it in
4578 * @num_acts: number of actions to hold with a large action entry
4580 static enum ice_status
4581 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4583 struct ice_aqc_alloc_free_res_elem *sw_buf;
4584 enum ice_status status;
4587 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4588 return ICE_ERR_PARAM;
4590 /* Allocate resource for large action */
4591 buf_len = sizeof(*sw_buf);
4592 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4593 ice_malloc(hw, buf_len);
4595 return ICE_ERR_NO_MEMORY;
4597 sw_buf->num_elems = CPU_TO_LE16(1);
4599 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4600 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4601 * If num_acts is greater than 2, then use
4602 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4603 * The num_acts cannot exceed 4. This was ensured at the
4604 * beginning of the function.
4607 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4608 else if (num_acts == 2)
4609 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4611 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4613 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4614 ice_aqc_opc_alloc_res, NULL);
4616 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4618 ice_free(hw, sw_buf);
4623 * ice_add_mac_with_sw_marker - add filter with sw marker
4624 * @hw: pointer to the hardware structure
4625 * @f_info: filter info structure containing the MAC filter information
4626 * @sw_marker: sw marker to tag the Rx descriptor with
4629 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4632 struct ice_fltr_mgmt_list_entry *m_entry;
4633 struct ice_fltr_list_entry fl_info;
4634 struct ice_sw_recipe *recp_list;
4635 struct LIST_HEAD_TYPE l_head;
4636 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4637 enum ice_status ret;
4641 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4642 return ICE_ERR_PARAM;
4644 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4645 return ICE_ERR_PARAM;
4647 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4648 return ICE_ERR_PARAM;
4650 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4651 return ICE_ERR_PARAM;
4652 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4654 /* Add filter if it doesn't exist so then the adding of large
4655 * action always results in update
4658 INIT_LIST_HEAD(&l_head);
4659 fl_info.fltr_info = *f_info;
4660 LIST_ADD(&fl_info.list_entry, &l_head);
4662 entry_exists = false;
4663 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4664 hw->port_info->lport);
4665 if (ret == ICE_ERR_ALREADY_EXISTS)
4666 entry_exists = true;
4670 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4671 rule_lock = &recp_list->filt_rule_lock;
4672 ice_acquire_lock(rule_lock);
4673 /* Get the book keeping entry for the filter */
4674 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4678 /* If counter action was enabled for this rule then don't enable
4679 * sw marker large action
4681 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4682 ret = ICE_ERR_PARAM;
4686 /* if same marker was added before */
4687 if (m_entry->sw_marker_id == sw_marker) {
4688 ret = ICE_ERR_ALREADY_EXISTS;
4692 /* Allocate a hardware table entry to hold large act. Three actions
4693 * for marker based large action
4695 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4699 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4702 /* Update the switch rule to add the marker action */
4703 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4705 ice_release_lock(rule_lock);
4710 ice_release_lock(rule_lock);
4711 /* only remove entry if it did not exist previously */
4713 ret = ice_remove_mac(hw, &l_head);
4719 * ice_add_mac_with_counter - add filter with counter enabled
4720 * @hw: pointer to the hardware structure
4721 * @f_info: pointer to filter info structure containing the MAC filter
4725 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4727 struct ice_fltr_mgmt_list_entry *m_entry;
4728 struct ice_fltr_list_entry fl_info;
4729 struct ice_sw_recipe *recp_list;
4730 struct LIST_HEAD_TYPE l_head;
4731 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4732 enum ice_status ret;
4737 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4738 return ICE_ERR_PARAM;
4740 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4741 return ICE_ERR_PARAM;
4743 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4744 return ICE_ERR_PARAM;
4745 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4746 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4748 entry_exist = false;
4750 rule_lock = &recp_list->filt_rule_lock;
4752 /* Add filter if it doesn't exist so then the adding of large
4753 * action always results in update
4755 INIT_LIST_HEAD(&l_head);
4757 fl_info.fltr_info = *f_info;
4758 LIST_ADD(&fl_info.list_entry, &l_head);
4760 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4761 hw->port_info->lport);
4762 if (ret == ICE_ERR_ALREADY_EXISTS)
4767 ice_acquire_lock(rule_lock);
4768 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4770 ret = ICE_ERR_BAD_PTR;
4774 /* Don't enable counter for a filter for which sw marker was enabled */
4775 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4776 ret = ICE_ERR_PARAM;
4780 /* If a counter was already enabled then don't need to add again */
4781 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4782 ret = ICE_ERR_ALREADY_EXISTS;
4786 /* Allocate a hardware table entry to VLAN counter */
4787 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4791 /* Allocate a hardware table entry to hold large act. Two actions for
4792 * counter based large action
4794 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4798 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4801 /* Update the switch rule to add the counter action */
4802 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4804 ice_release_lock(rule_lock);
4809 ice_release_lock(rule_lock);
4810 /* only remove entry if it did not exist previously */
4812 ret = ice_remove_mac(hw, &l_head);
4817 /* This is mapping table entry that maps every word within a given protocol
4818 * structure to the real byte offset as per the specification of that
4820 * for example dst address is 3 words in ethertype header and corresponding
4821 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4822 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4823 * matching entry describing its field. This needs to be updated if new
4824 * structure is added to that union.
4826 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4827 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4828 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4829 { ICE_ETYPE_OL, { 0 } },
4830 { ICE_VLAN_OFOS, { 0, 2 } },
4831 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4832 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4833 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4834 26, 28, 30, 32, 34, 36, 38 } },
4835 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4836 26, 28, 30, 32, 34, 36, 38 } },
4837 { ICE_TCP_IL, { 0, 2 } },
4838 { ICE_UDP_OF, { 0, 2 } },
4839 { ICE_UDP_ILOS, { 0, 2 } },
4840 { ICE_SCTP_IL, { 0, 2 } },
4841 { ICE_VXLAN, { 8, 10, 12, 14 } },
4842 { ICE_GENEVE, { 8, 10, 12, 14 } },
4843 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4844 { ICE_NVGRE, { 0, 2, 4, 6 } },
4845 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4846 { ICE_PPPOE, { 0, 2, 4, 6 } },
4849 /* The following table describes preferred grouping of recipes.
4850 * If a recipe that needs to be programmed is a superset or matches one of the
4851 * following combinations, then the recipe needs to be chained as per the
4855 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4856 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4857 { ICE_MAC_IL, ICE_MAC_IL_HW },
4858 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4859 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4860 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4861 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4862 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4863 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4864 { ICE_TCP_IL, ICE_TCP_IL_HW },
4865 { ICE_UDP_OF, ICE_UDP_OF_HW },
4866 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4867 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4868 { ICE_VXLAN, ICE_UDP_OF_HW },
4869 { ICE_GENEVE, ICE_UDP_OF_HW },
4870 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4871 { ICE_NVGRE, ICE_GRE_OF_HW },
4872 { ICE_GTP, ICE_UDP_OF_HW },
4873 { ICE_PPPOE, ICE_PPPOE_HW },
4877 * ice_find_recp - find a recipe
4878 * @hw: pointer to the hardware structure
4879 * @lkup_exts: extension sequence to match
4881 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4883 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4885 bool refresh_required = true;
4886 struct ice_sw_recipe *recp;
4889 /* Walk through existing recipes to find a match */
4890 recp = hw->switch_info->recp_list;
4891 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4892 /* If recipe was not created for this ID, in SW bookkeeping,
4893 * check if FW has an entry for this recipe. If the FW has an
4894 * entry update it in our SW bookkeeping and continue with the
4897 if (!recp[i].recp_created)
4898 if (ice_get_recp_frm_fw(hw,
4899 hw->switch_info->recp_list, i,
4903 /* Skip inverse action recipes */
4904 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4905 ICE_AQ_RECIPE_ACT_INV_ACT)
4908 /* if number of words we are looking for match */
4909 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4910 struct ice_fv_word *a = lkup_exts->fv_words;
4911 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4915 for (p = 0; p < lkup_exts->n_val_words; p++) {
4916 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4918 if (a[p].off == b[q].off &&
4919 a[p].prot_id == b[q].prot_id)
4920 /* Found the "p"th word in the
4925 /* After walking through all the words in the
4926 * "i"th recipe if "p"th word was not found then
4927 * this recipe is not what we are looking for.
4928 * So break out from this loop and try the next
4931 if (q >= recp[i].lkup_exts.n_val_words) {
4936 /* If for "i"th recipe the found was never set to false
4937 * then it means we found our match
4940 return i; /* Return the recipe ID */
4943 return ICE_MAX_NUM_RECIPES;
4947 * ice_prot_type_to_id - get protocol ID from protocol type
4948 * @type: protocol type
4949 * @id: pointer to variable that will receive the ID
4951 * Returns true if found, false otherwise
4953 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4957 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4958 if (ice_prot_id_tbl[i].type == type) {
4959 *id = ice_prot_id_tbl[i].protocol_id;
4966 * ice_find_valid_words - count valid words
4967 * @rule: advanced rule with lookup information
4968 * @lkup_exts: byte offset extractions of the words that are valid
4970 * calculate valid words in a lookup rule using mask value
4973 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4974 struct ice_prot_lkup_ext *lkup_exts)
4980 if (!ice_prot_type_to_id(rule->type, &prot_id))
4983 word = lkup_exts->n_val_words;
4985 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4986 if (((u16 *)&rule->m_u)[j] &&
4987 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4988 /* No more space to accommodate */
4989 if (word >= ICE_MAX_CHAIN_WORDS)
4991 lkup_exts->fv_words[word].off =
4992 ice_prot_ext[rule->type].offs[j];
4993 lkup_exts->fv_words[word].prot_id =
4994 ice_prot_id_tbl[rule->type].protocol_id;
4995 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4999 ret_val = word - lkup_exts->n_val_words;
5000 lkup_exts->n_val_words = word;
5006 * ice_create_first_fit_recp_def - Create a recipe grouping
5007 * @hw: pointer to the hardware structure
5008 * @lkup_exts: an array of protocol header extractions
5009 * @rg_list: pointer to a list that stores new recipe groups
5010 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5012 * Using first fit algorithm, take all the words that are still not done
5013 * and start grouping them in 4-word groups. Each group makes up one
5016 static enum ice_status
5017 ice_create_first_fit_recp_def(struct ice_hw *hw,
5018 struct ice_prot_lkup_ext *lkup_exts,
5019 struct LIST_HEAD_TYPE *rg_list,
5022 struct ice_pref_recipe_group *grp = NULL;
5027 /* Walk through every word in the rule to check if it is not done. If so
5028 * then this word needs to be part of a new recipe.
5030 for (j = 0; j < lkup_exts->n_val_words; j++)
5031 if (!ice_is_bit_set(lkup_exts->done, j)) {
5033 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5034 struct ice_recp_grp_entry *entry;
5036 entry = (struct ice_recp_grp_entry *)
5037 ice_malloc(hw, sizeof(*entry));
5039 return ICE_ERR_NO_MEMORY;
5040 LIST_ADD(&entry->l_entry, rg_list);
5041 grp = &entry->r_group;
5045 grp->pairs[grp->n_val_pairs].prot_id =
5046 lkup_exts->fv_words[j].prot_id;
5047 grp->pairs[grp->n_val_pairs].off =
5048 lkup_exts->fv_words[j].off;
5049 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5057 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5058 * @hw: pointer to the hardware structure
5059 * @fv_list: field vector with the extraction sequence information
5060 * @rg_list: recipe groupings with protocol-offset pairs
5062 * Helper function to fill in the field vector indices for protocol-offset
5063 * pairs. These indexes are then ultimately programmed into a recipe.
5065 static enum ice_status
5066 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5067 struct LIST_HEAD_TYPE *rg_list)
5069 struct ice_sw_fv_list_entry *fv;
5070 struct ice_recp_grp_entry *rg;
5071 struct ice_fv_word *fv_ext;
5073 if (LIST_EMPTY(fv_list))
5076 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5077 fv_ext = fv->fv_ptr->ew;
5079 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5082 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5083 struct ice_fv_word *pr;
5088 pr = &rg->r_group.pairs[i];
5089 mask = rg->r_group.mask[i];
5091 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5092 if (fv_ext[j].prot_id == pr->prot_id &&
5093 fv_ext[j].off == pr->off) {
5096 /* Store index of field vector */
5098 /* Mask is given by caller as big
5099 * endian, but sent to FW as little
5102 rg->fv_mask[i] = mask << 8 | mask >> 8;
5106 /* Protocol/offset could not be found, caller gave an
5110 return ICE_ERR_PARAM;
5118 * ice_find_free_recp_res_idx - find free result indexes for recipe
5119 * @hw: pointer to hardware structure
5120 * @profiles: bitmap of profiles that will be associated with the new recipe
5121 * @free_idx: pointer to variable to receive the free index bitmap
5123 * The algorithm used here is:
5124 * 1. When creating a new recipe, create a set P which contains all
5125 * Profiles that will be associated with our new recipe
5127 * 2. For each Profile p in set P:
5128 * a. Add all recipes associated with Profile p into set R
5129 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5130 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5131 * i. Or just assume they all have the same possible indexes:
5133 * i.e., PossibleIndexes = 0x0000F00000000000
5135 * 3. For each Recipe r in set R:
5136 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5137 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5139 * FreeIndexes will contain the bits indicating the indexes free for use,
5140 * then the code needs to update the recipe[r].used_result_idx_bits to
5141 * indicate which indexes were selected for use by this recipe.
5144 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5145 ice_bitmap_t *free_idx)
5147 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5148 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5149 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5153 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5154 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5155 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5156 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5158 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5159 ice_set_bit(count, possible_idx);
5161 /* For each profile we are going to associate the recipe with, add the
5162 * recipes that are associated with that profile. This will give us
5163 * the set of recipes that our recipe may collide with. Also, determine
5164 * what possible result indexes are usable given this set of profiles.
5167 while (ICE_MAX_NUM_PROFILES >
5168 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5169 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5170 ICE_MAX_NUM_RECIPES);
5171 ice_and_bitmap(possible_idx, possible_idx,
5172 hw->switch_info->prof_res_bm[bit],
5177 /* For each recipe that our new recipe may collide with, determine
5178 * which indexes have been used.
5180 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5181 if (ice_is_bit_set(recipes, bit)) {
5182 ice_or_bitmap(used_idx, used_idx,
5183 hw->switch_info->recp_list[bit].res_idxs,
5187 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5189 /* return number of free indexes */
5192 while (ICE_MAX_FV_WORDS >
5193 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5202 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5203 * @hw: pointer to hardware structure
5204 * @rm: recipe management list entry
5205 * @match_tun: if field vector index for tunnel needs to be programmed
5206 * @profiles: bitmap of profiles that will be assocated.
5208 static enum ice_status
5209 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5210 bool match_tun, ice_bitmap_t *profiles)
5212 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5213 struct ice_aqc_recipe_data_elem *tmp;
5214 struct ice_aqc_recipe_data_elem *buf;
5215 struct ice_recp_grp_entry *entry;
5216 enum ice_status status;
5222 /* When more than one recipe are required, another recipe is needed to
5223 * chain them together. Matching a tunnel metadata ID takes up one of
5224 * the match fields in the chaining recipe reducing the number of
5225 * chained recipes by one.
5227 /* check number of free result indices */
5228 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5229 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5231 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5232 free_res_idx, rm->n_grp_count);
5234 if (rm->n_grp_count > 1) {
5235 if (rm->n_grp_count > free_res_idx)
5236 return ICE_ERR_MAX_LIMIT;
5241 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5242 ICE_MAX_NUM_RECIPES,
5245 return ICE_ERR_NO_MEMORY;
5247 buf = (struct ice_aqc_recipe_data_elem *)
5248 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5250 status = ICE_ERR_NO_MEMORY;
5254 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5255 recipe_count = ICE_MAX_NUM_RECIPES;
5256 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5258 if (status || recipe_count == 0)
5261 /* Allocate the recipe resources, and configure them according to the
5262 * match fields from protocol headers and extracted field vectors.
5264 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5265 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5268 status = ice_alloc_recipe(hw, &entry->rid);
5272 /* Clear the result index of the located recipe, as this will be
5273 * updated, if needed, later in the recipe creation process.
5275 tmp[0].content.result_indx = 0;
5277 buf[recps] = tmp[0];
5278 buf[recps].recipe_indx = (u8)entry->rid;
5279 /* if the recipe is a non-root recipe RID should be programmed
5280 * as 0 for the rules to be applied correctly.
5282 buf[recps].content.rid = 0;
5283 ice_memset(&buf[recps].content.lkup_indx, 0,
5284 sizeof(buf[recps].content.lkup_indx),
5287 /* All recipes use look-up index 0 to match switch ID. */
5288 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5289 buf[recps].content.mask[0] =
5290 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5291 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5294 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5295 buf[recps].content.lkup_indx[i] = 0x80;
5296 buf[recps].content.mask[i] = 0;
5299 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5300 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5301 buf[recps].content.mask[i + 1] =
5302 CPU_TO_LE16(entry->fv_mask[i]);
5305 if (rm->n_grp_count > 1) {
5306 /* Checks to see if there really is a valid result index
5309 if (chain_idx >= ICE_MAX_FV_WORDS) {
5310 ice_debug(hw, ICE_DBG_SW,
5311 "No chain index available\n");
5312 status = ICE_ERR_MAX_LIMIT;
5316 entry->chain_idx = chain_idx;
5317 buf[recps].content.result_indx =
5318 ICE_AQ_RECIPE_RESULT_EN |
5319 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5320 ICE_AQ_RECIPE_RESULT_DATA_M);
5321 ice_clear_bit(chain_idx, result_idx_bm);
5322 chain_idx = ice_find_first_bit(result_idx_bm,
5326 /* fill recipe dependencies */
5327 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5328 ICE_MAX_NUM_RECIPES);
5329 ice_set_bit(buf[recps].recipe_indx,
5330 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5331 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5335 if (rm->n_grp_count == 1) {
5336 rm->root_rid = buf[0].recipe_indx;
5337 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5338 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5339 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5340 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5341 sizeof(buf[0].recipe_bitmap),
5342 ICE_NONDMA_TO_NONDMA);
5344 status = ICE_ERR_BAD_PTR;
5347 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5348 * the recipe which is getting created if specified
5349 * by user. Usually any advanced switch filter, which results
5350 * into new extraction sequence, ended up creating a new recipe
5351 * of type ROOT and usually recipes are associated with profiles
5352 * Switch rule referreing newly created recipe, needs to have
5353 * either/or 'fwd' or 'join' priority, otherwise switch rule
5354 * evaluation will not happen correctly. In other words, if
5355 * switch rule to be evaluated on priority basis, then recipe
5356 * needs to have priority, otherwise it will be evaluated last.
5358 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5360 struct ice_recp_grp_entry *last_chain_entry;
5363 /* Allocate the last recipe that will chain the outcomes of the
5364 * other recipes together
5366 status = ice_alloc_recipe(hw, &rid);
5370 buf[recps].recipe_indx = (u8)rid;
5371 buf[recps].content.rid = (u8)rid;
5372 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5373 /* the new entry created should also be part of rg_list to
5374 * make sure we have complete recipe
5376 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5377 sizeof(*last_chain_entry));
5378 if (!last_chain_entry) {
5379 status = ICE_ERR_NO_MEMORY;
5382 last_chain_entry->rid = rid;
5383 ice_memset(&buf[recps].content.lkup_indx, 0,
5384 sizeof(buf[recps].content.lkup_indx),
5386 /* All recipes use look-up index 0 to match switch ID. */
5387 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5388 buf[recps].content.mask[0] =
5389 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5390 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5391 buf[recps].content.lkup_indx[i] =
5392 ICE_AQ_RECIPE_LKUP_IGNORE;
5393 buf[recps].content.mask[i] = 0;
5397 /* update r_bitmap with the recp that is used for chaining */
5398 ice_set_bit(rid, rm->r_bitmap);
5399 /* this is the recipe that chains all the other recipes so it
5400 * should not have a chaining ID to indicate the same
5402 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5403 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5405 last_chain_entry->fv_idx[i] = entry->chain_idx;
5406 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5407 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5408 ice_set_bit(entry->rid, rm->r_bitmap);
5410 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5411 if (sizeof(buf[recps].recipe_bitmap) >=
5412 sizeof(rm->r_bitmap)) {
5413 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5414 sizeof(buf[recps].recipe_bitmap),
5415 ICE_NONDMA_TO_NONDMA);
5417 status = ICE_ERR_BAD_PTR;
5420 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5422 /* To differentiate among different UDP tunnels, a meta data ID
5426 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5427 buf[recps].content.mask[i] =
5428 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5432 rm->root_rid = (u8)rid;
5434 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5438 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5439 ice_release_change_lock(hw);
5443 /* Every recipe that just got created add it to the recipe
5446 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5447 struct ice_switch_info *sw = hw->switch_info;
5448 bool is_root, idx_found = false;
5449 struct ice_sw_recipe *recp;
5450 u16 idx, buf_idx = 0;
5452 /* find buffer index for copying some data */
5453 for (idx = 0; idx < rm->n_grp_count; idx++)
5454 if (buf[idx].recipe_indx == entry->rid) {
5460 status = ICE_ERR_OUT_OF_RANGE;
5464 recp = &sw->recp_list[entry->rid];
5465 is_root = (rm->root_rid == entry->rid);
5466 recp->is_root = is_root;
5468 recp->root_rid = entry->rid;
5469 recp->big_recp = (is_root && rm->n_grp_count > 1);
5471 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5472 entry->r_group.n_val_pairs *
5473 sizeof(struct ice_fv_word),
5474 ICE_NONDMA_TO_NONDMA);
5476 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5477 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5479 /* Copy non-result fv index values and masks to recipe. This
5480 * call will also update the result recipe bitmask.
5482 ice_collect_result_idx(&buf[buf_idx], recp);
5484 /* for non-root recipes, also copy to the root, this allows
5485 * easier matching of a complete chained recipe
5488 ice_collect_result_idx(&buf[buf_idx],
5489 &sw->recp_list[rm->root_rid]);
5491 recp->n_ext_words = entry->r_group.n_val_pairs;
5492 recp->chain_idx = entry->chain_idx;
5493 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5494 recp->n_grp_count = rm->n_grp_count;
5495 recp->tun_type = rm->tun_type;
5496 recp->recp_created = true;
5511 * ice_create_recipe_group - creates recipe group
5512 * @hw: pointer to hardware structure
5513 * @rm: recipe management list entry
5514 * @lkup_exts: lookup elements
5516 static enum ice_status
5517 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5518 struct ice_prot_lkup_ext *lkup_exts)
5520 enum ice_status status;
5523 rm->n_grp_count = 0;
5525 /* Create recipes for words that are marked not done by packing them
5528 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5529 &rm->rg_list, &recp_count);
5531 rm->n_grp_count += recp_count;
5532 rm->n_ext_words = lkup_exts->n_val_words;
5533 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5534 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5535 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5536 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5543 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5544 * @hw: pointer to hardware structure
5545 * @lkups: lookup elements or match criteria for the advanced recipe, one
5546 * structure per protocol header
5547 * @lkups_cnt: number of protocols
5548 * @bm: bitmap of field vectors to consider
5549 * @fv_list: pointer to a list that holds the returned field vectors
5551 static enum ice_status
5552 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5553 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5555 enum ice_status status;
5559 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5561 return ICE_ERR_NO_MEMORY;
5563 for (i = 0; i < lkups_cnt; i++)
5564 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5565 status = ICE_ERR_CFG;
5569 /* Find field vectors that include all specified protocol types */
5570 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5573 ice_free(hw, prot_ids);
5578 * ice_add_special_words - Add words that are not protocols, such as metadata
5579 * @rinfo: other information regarding the rule e.g. priority and action info
5580 * @lkup_exts: lookup word structure
5582 static enum ice_status
5583 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5584 struct ice_prot_lkup_ext *lkup_exts)
5586 /* If this is a tunneled packet, then add recipe index to match the
5587 * tunnel bit in the packet metadata flags.
5589 if (rinfo->tun_type != ICE_NON_TUN) {
5590 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5591 u8 word = lkup_exts->n_val_words++;
5593 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5594 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5596 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5598 return ICE_ERR_MAX_LIMIT;
5605 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5606 * @hw: pointer to hardware structure
5607 * @rinfo: other information regarding the rule e.g. priority and action info
5608 * @bm: pointer to memory for returning the bitmap of field vectors
5611 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5614 enum ice_prof_type type;
5616 switch (rinfo->tun_type) {
5618 type = ICE_PROF_NON_TUN;
5620 case ICE_ALL_TUNNELS:
5621 type = ICE_PROF_TUN_ALL;
5623 case ICE_SW_TUN_VXLAN_GPE:
5624 case ICE_SW_TUN_GENEVE:
5625 case ICE_SW_TUN_VXLAN:
5626 case ICE_SW_TUN_UDP:
5627 case ICE_SW_TUN_GTP:
5628 type = ICE_PROF_TUN_UDP;
5630 case ICE_SW_TUN_NVGRE:
5631 type = ICE_PROF_TUN_GRE;
5633 case ICE_SW_TUN_PPPOE:
5634 type = ICE_PROF_TUN_PPPOE;
5636 case ICE_SW_TUN_AND_NON_TUN:
5638 type = ICE_PROF_ALL;
5642 ice_get_sw_fv_bitmap(hw, type, bm);
5646 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5647 * @hw: pointer to hardware structure
5648 * @lkups: lookup elements or match criteria for the advanced recipe, one
5649 * structure per protocol header
5650 * @lkups_cnt: number of protocols
5651 * @rinfo: other information regarding the rule e.g. priority and action info
5652 * @rid: return the recipe ID of the recipe created
5654 static enum ice_status
5655 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5656 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5658 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5659 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5660 struct ice_prot_lkup_ext *lkup_exts;
5661 struct ice_recp_grp_entry *r_entry;
5662 struct ice_sw_fv_list_entry *fvit;
5663 struct ice_recp_grp_entry *r_tmp;
5664 struct ice_sw_fv_list_entry *tmp;
5665 enum ice_status status = ICE_SUCCESS;
5666 struct ice_sw_recipe *rm;
5667 bool match_tun = false;
5671 return ICE_ERR_PARAM;
5673 lkup_exts = (struct ice_prot_lkup_ext *)
5674 ice_malloc(hw, sizeof(*lkup_exts));
5676 return ICE_ERR_NO_MEMORY;
5678 /* Determine the number of words to be matched and if it exceeds a
5679 * recipe's restrictions
5681 for (i = 0; i < lkups_cnt; i++) {
5684 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5685 status = ICE_ERR_CFG;
5686 goto err_free_lkup_exts;
5689 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5691 status = ICE_ERR_CFG;
5692 goto err_free_lkup_exts;
5696 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5698 status = ICE_ERR_NO_MEMORY;
5699 goto err_free_lkup_exts;
5702 /* Get field vectors that contain fields extracted from all the protocol
5703 * headers being programmed.
5705 INIT_LIST_HEAD(&rm->fv_list);
5706 INIT_LIST_HEAD(&rm->rg_list);
5708 /* Get bitmap of field vectors (profiles) that are compatible with the
5709 * rule request; only these will be searched in the subsequent call to
5712 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5714 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5718 /* Group match words into recipes using preferred recipe grouping
5721 status = ice_create_recipe_group(hw, rm, lkup_exts);
5725 /* There is only profile for UDP tunnels. So, it is necessary to use a
5726 * metadata ID flag to differentiate different tunnel types. A separate
5727 * recipe needs to be used for the metadata.
5729 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5730 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5731 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5734 /* set the recipe priority if specified */
5735 rm->priority = rinfo->priority ? rinfo->priority : 0;
5737 /* Find offsets from the field vector. Pick the first one for all the
5740 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5744 /* get bitmap of all profiles the recipe will be associated with */
5745 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5746 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5748 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5749 ice_set_bit((u16)fvit->profile_id, profiles);
5752 /* Create any special protocol/offset pairs, such as looking at tunnel
5753 * bits by extracting metadata
5755 status = ice_add_special_words(rinfo, lkup_exts);
5757 goto err_free_lkup_exts;
5759 /* Look for a recipe which matches our requested fv / mask list */
5760 *rid = ice_find_recp(hw, lkup_exts);
5761 if (*rid < ICE_MAX_NUM_RECIPES)
5762 /* Success if found a recipe that match the existing criteria */
5765 /* Recipe we need does not exist, add a recipe */
5766 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5770 /* Associate all the recipes created with all the profiles in the
5771 * common field vector.
5773 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5775 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5778 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5779 (u8 *)r_bitmap, NULL);
5783 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5784 ICE_MAX_NUM_RECIPES);
5785 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5789 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5792 ice_release_change_lock(hw);
5797 /* Update profile to recipe bitmap array */
5798 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5799 ICE_MAX_NUM_RECIPES);
5801 /* Update recipe to profile bitmap array */
5802 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5803 if (ice_is_bit_set(r_bitmap, j))
5804 ice_set_bit((u16)fvit->profile_id,
5805 recipe_to_profile[j]);
5808 *rid = rm->root_rid;
5809 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5810 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5812 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5813 ice_recp_grp_entry, l_entry) {
5814 LIST_DEL(&r_entry->l_entry);
5815 ice_free(hw, r_entry);
5818 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5820 LIST_DEL(&fvit->list_entry);
5825 ice_free(hw, rm->root_buf);
5830 ice_free(hw, lkup_exts);
5836 * ice_find_dummy_packet - find dummy packet by tunnel type
5838 * @lkups: lookup elements or match criteria for the advanced recipe, one
5839 * structure per protocol header
5840 * @lkups_cnt: number of protocols
5841 * @tun_type: tunnel type from the match criteria
5842 * @pkt: dummy packet to fill according to filter match criteria
5843 * @pkt_len: packet length of dummy packet
5844 * @offsets: pointer to receive the pointer to the offsets for the packet
5847 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5848 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5850 const struct ice_dummy_pkt_offsets **offsets)
5852 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5855 if (tun_type == ICE_SW_TUN_GTP) {
5856 *pkt = dummy_udp_gtp_packet;
5857 *pkt_len = sizeof(dummy_udp_gtp_packet);
5858 *offsets = dummy_udp_gtp_packet_offsets;
5861 if (tun_type == ICE_SW_TUN_PPPOE) {
5862 *pkt = dummy_pppoe_packet;
5863 *pkt_len = sizeof(dummy_pppoe_packet);
5864 *offsets = dummy_pppoe_packet_offsets;
5867 for (i = 0; i < lkups_cnt; i++) {
5868 if (lkups[i].type == ICE_UDP_ILOS)
5870 else if (lkups[i].type == ICE_TCP_IL)
5872 else if (lkups[i].type == ICE_IPV6_OFOS)
5874 else if (lkups[i].type == ICE_VLAN_OFOS)
5878 if (tun_type == ICE_ALL_TUNNELS) {
5879 *pkt = dummy_gre_udp_packet;
5880 *pkt_len = sizeof(dummy_gre_udp_packet);
5881 *offsets = dummy_gre_udp_packet_offsets;
5885 if (tun_type == ICE_SW_TUN_NVGRE) {
5887 *pkt = dummy_gre_tcp_packet;
5888 *pkt_len = sizeof(dummy_gre_tcp_packet);
5889 *offsets = dummy_gre_tcp_packet_offsets;
5893 *pkt = dummy_gre_udp_packet;
5894 *pkt_len = sizeof(dummy_gre_udp_packet);
5895 *offsets = dummy_gre_udp_packet_offsets;
5899 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5900 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5902 *pkt = dummy_udp_tun_tcp_packet;
5903 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5904 *offsets = dummy_udp_tun_tcp_packet_offsets;
5908 *pkt = dummy_udp_tun_udp_packet;
5909 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5910 *offsets = dummy_udp_tun_udp_packet_offsets;
5916 *pkt = dummy_vlan_udp_packet;
5917 *pkt_len = sizeof(dummy_vlan_udp_packet);
5918 *offsets = dummy_vlan_udp_packet_offsets;
5921 *pkt = dummy_udp_packet;
5922 *pkt_len = sizeof(dummy_udp_packet);
5923 *offsets = dummy_udp_packet_offsets;
5925 } else if (udp && ipv6) {
5927 *pkt = dummy_vlan_udp_ipv6_packet;
5928 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5929 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5932 *pkt = dummy_udp_ipv6_packet;
5933 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5934 *offsets = dummy_udp_ipv6_packet_offsets;
5936 } else if ((tcp && ipv6) || ipv6) {
5938 *pkt = dummy_vlan_tcp_ipv6_packet;
5939 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
5940 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
5943 *pkt = dummy_tcp_ipv6_packet;
5944 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5945 *offsets = dummy_tcp_ipv6_packet_offsets;
5950 *pkt = dummy_vlan_tcp_packet;
5951 *pkt_len = sizeof(dummy_vlan_tcp_packet);
5952 *offsets = dummy_vlan_tcp_packet_offsets;
5954 *pkt = dummy_tcp_packet;
5955 *pkt_len = sizeof(dummy_tcp_packet);
5956 *offsets = dummy_tcp_packet_offsets;
5961 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5963 * @lkups: lookup elements or match criteria for the advanced recipe, one
5964 * structure per protocol header
5965 * @lkups_cnt: number of protocols
5966 * @s_rule: stores rule information from the match criteria
5967 * @dummy_pkt: dummy packet to fill according to filter match criteria
5968 * @pkt_len: packet length of dummy packet
5969 * @offsets: offset info for the dummy packet
5971 static enum ice_status
5972 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5973 struct ice_aqc_sw_rules_elem *s_rule,
5974 const u8 *dummy_pkt, u16 pkt_len,
5975 const struct ice_dummy_pkt_offsets *offsets)
5980 /* Start with a packet with a pre-defined/dummy content. Then, fill
5981 * in the header values to be looked up or matched.
5983 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5985 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5987 for (i = 0; i < lkups_cnt; i++) {
5988 enum ice_protocol_type type;
5989 u16 offset = 0, len = 0, j;
5992 /* find the start of this layer; it should be found since this
5993 * was already checked when search for the dummy packet
5995 type = lkups[i].type;
5996 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5997 if (type == offsets[j].type) {
5998 offset = offsets[j].offset;
6003 /* this should never happen in a correct calling sequence */
6005 return ICE_ERR_PARAM;
6007 switch (lkups[i].type) {
6010 len = sizeof(struct ice_ether_hdr);
6013 len = sizeof(struct ice_ethtype_hdr);
6016 len = sizeof(struct ice_vlan_hdr);
6020 len = sizeof(struct ice_ipv4_hdr);
6024 len = sizeof(struct ice_ipv6_hdr);
6029 len = sizeof(struct ice_l4_hdr);
6032 len = sizeof(struct ice_sctp_hdr);
6035 len = sizeof(struct ice_nvgre);
6040 len = sizeof(struct ice_udp_tnl_hdr);
6044 len = sizeof(struct ice_udp_gtp_hdr);
6047 len = sizeof(struct ice_pppoe_hdr);
6050 return ICE_ERR_PARAM;
6053 /* the length should be a word multiple */
6054 if (len % ICE_BYTES_PER_WORD)
6057 /* We have the offset to the header start, the length, the
6058 * caller's header values and mask. Use this information to
6059 * copy the data into the dummy packet appropriately based on
6060 * the mask. Note that we need to only write the bits as
6061 * indicated by the mask to make sure we don't improperly write
6062 * over any significant packet data.
6064 for (j = 0; j < len / sizeof(u16); j++)
6065 if (((u16 *)&lkups[i].m_u)[j])
6066 ((u16 *)(pkt + offset))[j] =
6067 (((u16 *)(pkt + offset))[j] &
6068 ~((u16 *)&lkups[i].m_u)[j]) |
6069 (((u16 *)&lkups[i].h_u)[j] &
6070 ((u16 *)&lkups[i].m_u)[j]);
6073 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6079 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6080 * @hw: pointer to the hardware structure
6081 * @tun_type: tunnel type
6082 * @pkt: dummy packet to fill in
6083 * @offsets: offset info for the dummy packet
6085 static enum ice_status
6086 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6087 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6092 case ICE_SW_TUN_AND_NON_TUN:
6093 case ICE_SW_TUN_VXLAN_GPE:
6094 case ICE_SW_TUN_VXLAN:
6095 case ICE_SW_TUN_UDP:
6096 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6100 case ICE_SW_TUN_GENEVE:
6101 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6106 /* Nothing needs to be done for this tunnel type */
6110 /* Find the outer UDP protocol header and insert the port number */
6111 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6112 if (offsets[i].type == ICE_UDP_OF) {
6113 struct ice_l4_hdr *hdr;
6116 offset = offsets[i].offset;
6117 hdr = (struct ice_l4_hdr *)&pkt[offset];
6118 hdr->dst_port = CPU_TO_BE16(open_port);
6128 * ice_find_adv_rule_entry - Search a rule entry
6129 * @hw: pointer to the hardware structure
6130 * @lkups: lookup elements or match criteria for the advanced recipe, one
6131 * structure per protocol header
6132 * @lkups_cnt: number of protocols
6133 * @recp_id: recipe ID for which we are finding the rule
6134 * @rinfo: other information regarding the rule e.g. priority and action info
6136 * Helper function to search for a given advance rule entry
6137 * Returns pointer to entry storing the rule if found
6139 static struct ice_adv_fltr_mgmt_list_entry *
6140 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6141 u16 lkups_cnt, u8 recp_id,
6142 struct ice_adv_rule_info *rinfo)
6144 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6145 struct ice_switch_info *sw = hw->switch_info;
6148 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6149 ice_adv_fltr_mgmt_list_entry, list_entry) {
6150 bool lkups_matched = true;
6152 if (lkups_cnt != list_itr->lkups_cnt)
6154 for (i = 0; i < list_itr->lkups_cnt; i++)
6155 if (memcmp(&list_itr->lkups[i], &lkups[i],
6157 lkups_matched = false;
6160 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6161 rinfo->tun_type == list_itr->rule_info.tun_type &&
6169 * ice_adv_add_update_vsi_list
6170 * @hw: pointer to the hardware structure
6171 * @m_entry: pointer to current adv filter management list entry
6172 * @cur_fltr: filter information from the book keeping entry
6173 * @new_fltr: filter information with the new VSI to be added
6175 * Call AQ command to add or update previously created VSI list with new VSI.
6177 * Helper function to do book keeping associated with adding filter information
6178 * The algorithm to do the booking keeping is described below :
6179 * When a VSI needs to subscribe to a given advanced filter
6180 * if only one VSI has been added till now
6181 * Allocate a new VSI list and add two VSIs
6182 * to this list using switch rule command
6183 * Update the previously created switch rule with the
6184 * newly created VSI list ID
6185 * if a VSI list was previously created
6186 * Add the new VSI to the previously created VSI list set
6187 * using the update switch rule command
6189 static enum ice_status
6190 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6191 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6192 struct ice_adv_rule_info *cur_fltr,
6193 struct ice_adv_rule_info *new_fltr)
6195 enum ice_status status;
6196 u16 vsi_list_id = 0;
6198 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6199 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6200 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6201 return ICE_ERR_NOT_IMPL;
6203 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6204 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6205 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6206 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6207 return ICE_ERR_NOT_IMPL;
6209 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6210 /* Only one entry existed in the mapping and it was not already
6211 * a part of a VSI list. So, create a VSI list with the old and
6214 struct ice_fltr_info tmp_fltr;
6215 u16 vsi_handle_arr[2];
6217 /* A rule already exists with the new VSI being added */
6218 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6219 new_fltr->sw_act.fwd_id.hw_vsi_id)
6220 return ICE_ERR_ALREADY_EXISTS;
6222 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6223 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6224 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6230 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6231 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6232 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6233 /* Update the previous switch rule of "forward to VSI" to
6236 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6240 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6241 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6242 m_entry->vsi_list_info =
6243 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6246 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6248 if (!m_entry->vsi_list_info)
6251 /* A rule already exists with the new VSI being added */
6252 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6255 /* Update the previously created VSI list set with
6256 * the new VSI ID passed in
6258 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6260 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6262 ice_aqc_opc_update_sw_rules,
6264 /* update VSI list mapping info with new VSI ID */
6266 ice_set_bit(vsi_handle,
6267 m_entry->vsi_list_info->vsi_map);
6270 m_entry->vsi_count++;
6275 * ice_add_adv_rule - helper function to create an advanced switch rule
6276 * @hw: pointer to the hardware structure
6277 * @lkups: information on the words that needs to be looked up. All words
6278 * together makes one recipe
6279 * @lkups_cnt: num of entries in the lkups array
6280 * @rinfo: other information related to the rule that needs to be programmed
6281 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6282 * ignored is case of error.
6284 * This function can program only 1 rule at a time. The lkups is used to
6285 * describe the all the words that forms the "lookup" portion of the recipe.
6286 * These words can span multiple protocols. Callers to this function need to
6287 * pass in a list of protocol headers with lookup information along and mask
6288 * that determines which words are valid from the given protocol header.
6289 * rinfo describes other information related to this rule such as forwarding
6290 * IDs, priority of this rule, etc.
6293 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6294 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6295 struct ice_rule_query_data *added_entry)
6297 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6298 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6299 const struct ice_dummy_pkt_offsets *pkt_offsets;
6300 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6301 struct LIST_HEAD_TYPE *rule_head;
6302 struct ice_switch_info *sw;
6303 enum ice_status status;
6304 const u8 *pkt = NULL;
6309 /* Initialize profile to result index bitmap */
6310 if (!hw->switch_info->prof_res_bm_init) {
6311 hw->switch_info->prof_res_bm_init = 1;
6312 ice_init_prof_result_bm(hw);
6316 return ICE_ERR_PARAM;
6318 /* get # of words we need to match */
6320 for (i = 0; i < lkups_cnt; i++) {
6323 ptr = (u16 *)&lkups[i].m_u;
6324 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6328 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6329 return ICE_ERR_PARAM;
6331 /* make sure that we can locate a dummy packet */
6332 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6335 status = ICE_ERR_PARAM;
6336 goto err_ice_add_adv_rule;
6339 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6340 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6341 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6342 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6345 vsi_handle = rinfo->sw_act.vsi_handle;
6346 if (!ice_is_vsi_valid(hw, vsi_handle))
6347 return ICE_ERR_PARAM;
6349 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6350 rinfo->sw_act.fwd_id.hw_vsi_id =
6351 ice_get_hw_vsi_num(hw, vsi_handle);
6352 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6353 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6355 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6358 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6360 /* we have to add VSI to VSI_LIST and increment vsi_count.
6361 * Also Update VSI list so that we can change forwarding rule
6362 * if the rule already exists, we will check if it exists with
6363 * same vsi_id, if not then add it to the VSI list if it already
6364 * exists if not then create a VSI list and add the existing VSI
6365 * ID and the new VSI ID to the list
6366 * We will add that VSI to the list
6368 status = ice_adv_add_update_vsi_list(hw, m_entry,
6369 &m_entry->rule_info,
6372 added_entry->rid = rid;
6373 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6374 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6378 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6379 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6381 return ICE_ERR_NO_MEMORY;
6382 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6383 switch (rinfo->sw_act.fltr_act) {
6384 case ICE_FWD_TO_VSI:
6385 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6386 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6387 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6390 act |= ICE_SINGLE_ACT_TO_Q;
6391 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6392 ICE_SINGLE_ACT_Q_INDEX_M;
6394 case ICE_FWD_TO_QGRP:
6395 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6396 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6397 act |= ICE_SINGLE_ACT_TO_Q;
6398 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6399 ICE_SINGLE_ACT_Q_INDEX_M;
6400 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6401 ICE_SINGLE_ACT_Q_REGION_M;
6403 case ICE_DROP_PACKET:
6404 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6405 ICE_SINGLE_ACT_VALID_BIT;
6408 status = ICE_ERR_CFG;
6409 goto err_ice_add_adv_rule;
6412 /* set the rule LOOKUP type based on caller specified 'RX'
6413 * instead of hardcoding it to be either LOOKUP_TX/RX
6415 * for 'RX' set the source to be the port number
6416 * for 'TX' set the source to be the source HW VSI number (determined
6420 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6421 s_rule->pdata.lkup_tx_rx.src =
6422 CPU_TO_LE16(hw->port_info->lport);
6424 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6425 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6428 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6429 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6431 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6432 pkt_len, pkt_offsets);
6434 goto err_ice_add_adv_rule;
6436 if (rinfo->tun_type != ICE_NON_TUN &&
6437 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6438 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6439 s_rule->pdata.lkup_tx_rx.hdr,
6442 goto err_ice_add_adv_rule;
6445 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6446 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6449 goto err_ice_add_adv_rule;
6450 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6451 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6453 status = ICE_ERR_NO_MEMORY;
6454 goto err_ice_add_adv_rule;
6457 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6458 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6459 ICE_NONDMA_TO_NONDMA);
6460 if (!adv_fltr->lkups) {
6461 status = ICE_ERR_NO_MEMORY;
6462 goto err_ice_add_adv_rule;
6465 adv_fltr->lkups_cnt = lkups_cnt;
6466 adv_fltr->rule_info = *rinfo;
6467 adv_fltr->rule_info.fltr_rule_id =
6468 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6469 sw = hw->switch_info;
6470 sw->recp_list[rid].adv_rule = true;
6471 rule_head = &sw->recp_list[rid].filt_rules;
6473 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6474 struct ice_fltr_info tmp_fltr;
6476 tmp_fltr.fltr_rule_id =
6477 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6478 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6479 tmp_fltr.fwd_id.hw_vsi_id =
6480 ice_get_hw_vsi_num(hw, vsi_handle);
6481 tmp_fltr.vsi_handle = vsi_handle;
6482 /* Update the previous switch rule of "forward to VSI" to
6485 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6487 goto err_ice_add_adv_rule;
6488 adv_fltr->vsi_count = 1;
6491 /* Add rule entry to book keeping list */
6492 LIST_ADD(&adv_fltr->list_entry, rule_head);
6494 added_entry->rid = rid;
6495 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6496 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6498 err_ice_add_adv_rule:
6499 if (status && adv_fltr) {
6500 ice_free(hw, adv_fltr->lkups);
6501 ice_free(hw, adv_fltr);
6504 ice_free(hw, s_rule);
6510 * ice_adv_rem_update_vsi_list
6511 * @hw: pointer to the hardware structure
6512 * @vsi_handle: VSI handle of the VSI to remove
6513 * @fm_list: filter management entry for which the VSI list management needs to
6516 static enum ice_status
6517 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6518 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6520 struct ice_vsi_list_map_info *vsi_list_info;
6521 enum ice_sw_lkup_type lkup_type;
6522 enum ice_status status;
6525 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6526 fm_list->vsi_count == 0)
6527 return ICE_ERR_PARAM;
6529 /* A rule with the VSI being removed does not exist */
6530 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6531 return ICE_ERR_DOES_NOT_EXIST;
6533 lkup_type = ICE_SW_LKUP_LAST;
6534 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6535 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6536 ice_aqc_opc_update_sw_rules,
6541 fm_list->vsi_count--;
6542 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6543 vsi_list_info = fm_list->vsi_list_info;
6544 if (fm_list->vsi_count == 1) {
6545 struct ice_fltr_info tmp_fltr;
6548 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6550 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6551 return ICE_ERR_OUT_OF_RANGE;
6553 /* Make sure VSI list is empty before removing it below */
6554 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6556 ice_aqc_opc_update_sw_rules,
6560 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6561 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6562 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6563 tmp_fltr.fwd_id.hw_vsi_id =
6564 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6565 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6566 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6568 /* Update the previous switch rule of "MAC forward to VSI" to
6569 * "MAC fwd to VSI list"
6571 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6573 ice_debug(hw, ICE_DBG_SW,
6574 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6575 tmp_fltr.fwd_id.hw_vsi_id, status);
6579 /* Remove the VSI list since it is no longer used */
6580 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6582 ice_debug(hw, ICE_DBG_SW,
6583 "Failed to remove VSI list %d, error %d\n",
6584 vsi_list_id, status);
6588 LIST_DEL(&vsi_list_info->list_entry);
6589 ice_free(hw, vsi_list_info);
6590 fm_list->vsi_list_info = NULL;
6597 * ice_rem_adv_rule - removes existing advanced switch rule
6598 * @hw: pointer to the hardware structure
6599 * @lkups: information on the words that needs to be looked up. All words
6600 * together makes one recipe
6601 * @lkups_cnt: num of entries in the lkups array
6602 * @rinfo: Its the pointer to the rule information for the rule
6604 * This function can be used to remove 1 rule at a time. The lkups is
6605 * used to describe all the words that forms the "lookup" portion of the
6606 * rule. These words can span multiple protocols. Callers to this function
6607 * need to pass in a list of protocol headers with lookup information along
6608 * and mask that determines which words are valid from the given protocol
6609 * header. rinfo describes other information related to this rule such as
6610 * forwarding IDs, priority of this rule, etc.
6613 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6614 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6616 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6617 struct ice_prot_lkup_ext lkup_exts;
6618 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6619 enum ice_status status = ICE_SUCCESS;
6620 bool remove_rule = false;
6621 u16 i, rid, vsi_handle;
6623 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6624 for (i = 0; i < lkups_cnt; i++) {
6627 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6630 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6635 /* Create any special protocol/offset pairs, such as looking at tunnel
6636 * bits by extracting metadata
6638 status = ice_add_special_words(rinfo, &lkup_exts);
6642 rid = ice_find_recp(hw, &lkup_exts);
6643 /* If did not find a recipe that match the existing criteria */
6644 if (rid == ICE_MAX_NUM_RECIPES)
6645 return ICE_ERR_PARAM;
6647 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6648 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6649 /* the rule is already removed */
6652 ice_acquire_lock(rule_lock);
6653 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6655 } else if (list_elem->vsi_count > 1) {
6656 list_elem->vsi_list_info->ref_cnt--;
6657 remove_rule = false;
6658 vsi_handle = rinfo->sw_act.vsi_handle;
6659 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6661 vsi_handle = rinfo->sw_act.vsi_handle;
6662 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6664 ice_release_lock(rule_lock);
6667 if (list_elem->vsi_count == 0)
6670 ice_release_lock(rule_lock);
6672 struct ice_aqc_sw_rules_elem *s_rule;
6675 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6677 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6680 return ICE_ERR_NO_MEMORY;
6681 s_rule->pdata.lkup_tx_rx.act = 0;
6682 s_rule->pdata.lkup_tx_rx.index =
6683 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6684 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6685 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6687 ice_aqc_opc_remove_sw_rules, NULL);
6688 if (status == ICE_SUCCESS) {
6689 ice_acquire_lock(rule_lock);
6690 LIST_DEL(&list_elem->list_entry);
6691 ice_free(hw, list_elem->lkups);
6692 ice_free(hw, list_elem);
6693 ice_release_lock(rule_lock);
6695 ice_free(hw, s_rule);
6701 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6702 * @hw: pointer to the hardware structure
6703 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6705 * This function is used to remove 1 rule at a time. The removal is based on
6706 * the remove_entry parameter. This function will remove rule for a given
6707 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6710 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6711 struct ice_rule_query_data *remove_entry)
6713 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6714 struct LIST_HEAD_TYPE *list_head;
6715 struct ice_adv_rule_info rinfo;
6716 struct ice_switch_info *sw;
6718 sw = hw->switch_info;
6719 if (!sw->recp_list[remove_entry->rid].recp_created)
6720 return ICE_ERR_PARAM;
6721 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6722 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6724 if (list_itr->rule_info.fltr_rule_id ==
6725 remove_entry->rule_id) {
6726 rinfo = list_itr->rule_info;
6727 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6728 return ice_rem_adv_rule(hw, list_itr->lkups,
6729 list_itr->lkups_cnt, &rinfo);
6732 return ICE_ERR_PARAM;
6736 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6738 * @hw: pointer to the hardware structure
6739 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6741 * This function is used to remove all the rules for a given VSI and as soon
6742 * as removing a rule fails, it will return immediately with the error code,
6743 * else it will return ICE_SUCCESS
6746 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6748 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6749 struct ice_vsi_list_map_info *map_info;
6750 struct LIST_HEAD_TYPE *list_head;
6751 struct ice_adv_rule_info rinfo;
6752 struct ice_switch_info *sw;
6753 enum ice_status status;
6754 u16 vsi_list_id = 0;
6757 sw = hw->switch_info;
6758 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6759 if (!sw->recp_list[rid].recp_created)
6761 if (!sw->recp_list[rid].adv_rule)
6763 list_head = &sw->recp_list[rid].filt_rules;
6765 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6766 ice_adv_fltr_mgmt_list_entry, list_entry) {
6767 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6771 rinfo = list_itr->rule_info;
6772 rinfo.sw_act.vsi_handle = vsi_handle;
6773 status = ice_rem_adv_rule(hw, list_itr->lkups,
6774 list_itr->lkups_cnt, &rinfo);
6784 * ice_replay_fltr - Replay all the filters stored by a specific list head
6785 * @hw: pointer to the hardware structure
6786 * @list_head: list for which filters needs to be replayed
6787 * @recp_id: Recipe ID for which rules need to be replayed
6789 static enum ice_status
6790 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6792 struct ice_fltr_mgmt_list_entry *itr;
6793 enum ice_status status = ICE_SUCCESS;
6794 struct ice_sw_recipe *recp_list;
6795 u8 lport = hw->port_info->lport;
6796 struct LIST_HEAD_TYPE l_head;
6798 if (LIST_EMPTY(list_head))
6801 recp_list = &hw->switch_info->recp_list[recp_id];
6802 /* Move entries from the given list_head to a temporary l_head so that
6803 * they can be replayed. Otherwise when trying to re-add the same
6804 * filter, the function will return already exists
6806 LIST_REPLACE_INIT(list_head, &l_head);
6808 /* Mark the given list_head empty by reinitializing it so filters
6809 * could be added again by *handler
6811 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6813 struct ice_fltr_list_entry f_entry;
6815 f_entry.fltr_info = itr->fltr_info;
6816 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6817 status = ice_add_rule_internal(hw, recp_list, lport,
6819 if (status != ICE_SUCCESS)
6824 /* Add a filter per VSI separately */
6829 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6831 if (!ice_is_vsi_valid(hw, vsi_handle))
6834 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6835 f_entry.fltr_info.vsi_handle = vsi_handle;
6836 f_entry.fltr_info.fwd_id.hw_vsi_id =
6837 ice_get_hw_vsi_num(hw, vsi_handle);
6838 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6839 if (recp_id == ICE_SW_LKUP_VLAN)
6840 status = ice_add_vlan_internal(hw, &f_entry);
6842 status = ice_add_rule_internal(hw, recp_list,
6845 if (status != ICE_SUCCESS)
6850 /* Clear the filter management list */
6851 ice_rem_sw_rule_info(hw, &l_head);
6856 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6857 * @hw: pointer to the hardware structure
6859 * NOTE: This function does not clean up partially added filters on error.
6860 * It is up to caller of the function to issue a reset or fail early.
6862 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6864 struct ice_switch_info *sw = hw->switch_info;
6865 enum ice_status status = ICE_SUCCESS;
6868 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6869 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6871 status = ice_replay_fltr(hw, i, head);
6872 if (status != ICE_SUCCESS)
6879 * ice_replay_vsi_fltr - Replay filters for requested VSI
6880 * @hw: pointer to the hardware structure
6881 * @vsi_handle: driver VSI handle
6882 * @recp_id: Recipe ID for which rules need to be replayed
6883 * @list_head: list for which filters need to be replayed
6885 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6886 * It is required to pass valid VSI handle.
6888 static enum ice_status
6889 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6890 struct LIST_HEAD_TYPE *list_head)
6892 struct ice_fltr_mgmt_list_entry *itr;
6893 enum ice_status status = ICE_SUCCESS;
6894 struct ice_sw_recipe *recp_list;
6897 if (LIST_EMPTY(list_head))
6899 recp_list = &hw->switch_info->recp_list[recp_id];
6900 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6902 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6904 struct ice_fltr_list_entry f_entry;
6906 f_entry.fltr_info = itr->fltr_info;
6907 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6908 itr->fltr_info.vsi_handle == vsi_handle) {
6909 /* update the src in case it is VSI num */
6910 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6911 f_entry.fltr_info.src = hw_vsi_id;
6912 status = ice_add_rule_internal(hw, recp_list,
6913 hw->port_info->lport,
6915 if (status != ICE_SUCCESS)
6919 if (!itr->vsi_list_info ||
6920 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6922 /* Clearing it so that the logic can add it back */
6923 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6924 f_entry.fltr_info.vsi_handle = vsi_handle;
6925 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6926 /* update the src in case it is VSI num */
6927 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6928 f_entry.fltr_info.src = hw_vsi_id;
6929 if (recp_id == ICE_SW_LKUP_VLAN)
6930 status = ice_add_vlan_internal(hw, &f_entry);
6932 status = ice_add_rule_internal(hw, recp_list,
6933 hw->port_info->lport,
6935 if (status != ICE_SUCCESS)
6943 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6944 * @hw: pointer to the hardware structure
6945 * @vsi_handle: driver VSI handle
6946 * @list_head: list for which filters need to be replayed
6948 * Replay the advanced rule for the given VSI.
6950 static enum ice_status
6951 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6952 struct LIST_HEAD_TYPE *list_head)
6954 struct ice_rule_query_data added_entry = { 0 };
6955 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6956 enum ice_status status = ICE_SUCCESS;
6958 if (LIST_EMPTY(list_head))
6960 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6962 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6963 u16 lk_cnt = adv_fltr->lkups_cnt;
6965 if (vsi_handle != rinfo->sw_act.vsi_handle)
6967 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6976 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6977 * @hw: pointer to the hardware structure
6978 * @vsi_handle: driver VSI handle
6980 * Replays filters for requested VSI via vsi_handle.
6982 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6984 struct ice_switch_info *sw = hw->switch_info;
6985 enum ice_status status;
6988 /* Update the recipes that were created */
6989 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6990 struct LIST_HEAD_TYPE *head;
6992 head = &sw->recp_list[i].filt_replay_rules;
6993 if (!sw->recp_list[i].adv_rule)
6994 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6996 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6997 if (status != ICE_SUCCESS)
7005 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7006 * @hw: pointer to the HW struct
7008 * Deletes the filter replay rules.
7010 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7012 struct ice_switch_info *sw = hw->switch_info;
7018 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7019 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7020 struct LIST_HEAD_TYPE *l_head;
7022 l_head = &sw->recp_list[i].filt_replay_rules;
7023 if (!sw->recp_list[i].adv_rule)
7024 ice_rem_sw_rule_info(hw, l_head);
7026 ice_rem_adv_rule_info(hw, l_head);