1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
60 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
63 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
71 static const u8 dummy_gre_tcp_packet[] = {
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
76 0x08, 0x00, /* ICE_ETYPE_OL 12 */
78 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
79 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x2F, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
84 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
85 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
88 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00,
92 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
93 0x00, 0x00, 0x00, 0x00,
94 0x00, 0x06, 0x00, 0x00,
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x50, 0x02, 0x20, 0x00,
102 0x00, 0x00, 0x00, 0x00
105 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
107 { ICE_ETYPE_OL, 12 },
108 { ICE_IPV4_OFOS, 14 },
112 { ICE_UDP_ILOS, 76 },
113 { ICE_PROTOCOL_LAST, 0 },
116 static const u8 dummy_gre_udp_packet[] = {
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
121 0x08, 0x00, /* ICE_ETYPE_OL 12 */
123 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
124 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x2F, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
129 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
130 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
137 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x11, 0x00, 0x00,
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
144 0x00, 0x08, 0x00, 0x00,
147 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
149 { ICE_ETYPE_OL, 12 },
150 { ICE_IPV4_OFOS, 14 },
154 { ICE_VXLAN_GPE, 42 },
158 { ICE_PROTOCOL_LAST, 0 },
161 static const u8 dummy_udp_tun_tcp_packet[] = {
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x08, 0x00, /* ICE_ETYPE_OL 12 */
168 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x11, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
175 0x00, 0x46, 0x00, 0x00,
177 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
181 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
185 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
186 0x00, 0x01, 0x00, 0x00,
187 0x40, 0x06, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x50, 0x02, 0x20, 0x00,
195 0x00, 0x00, 0x00, 0x00
198 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
200 { ICE_ETYPE_OL, 12 },
201 { ICE_IPV4_OFOS, 14 },
205 { ICE_VXLAN_GPE, 42 },
208 { ICE_UDP_ILOS, 84 },
209 { ICE_PROTOCOL_LAST, 0 },
212 static const u8 dummy_udp_tun_udp_packet[] = {
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
217 0x08, 0x00, /* ICE_ETYPE_OL 12 */
219 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
226 0x00, 0x3a, 0x00, 0x00,
228 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
236 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
237 0x00, 0x01, 0x00, 0x00,
238 0x00, 0x11, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
243 0x00, 0x08, 0x00, 0x00,
246 /* offset info for MAC + IPv4 + UDP dummy packet */
247 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
249 { ICE_ETYPE_OL, 12 },
250 { ICE_IPV4_OFOS, 14 },
251 { ICE_UDP_ILOS, 34 },
252 { ICE_PROTOCOL_LAST, 0 },
255 /* Dummy packet for MAC + IPv4 + UDP */
256 static const u8 dummy_udp_packet[] = {
257 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
258 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00,
261 0x08, 0x00, /* ICE_ETYPE_OL 12 */
263 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
264 0x00, 0x01, 0x00, 0x00,
265 0x00, 0x11, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
270 0x00, 0x08, 0x00, 0x00,
272 0x00, 0x00, /* 2 bytes for 4 byte alignment */
275 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
276 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
278 { ICE_ETYPE_OL, 12 },
279 { ICE_VLAN_OFOS, 14 },
280 { ICE_IPV4_OFOS, 18 },
281 { ICE_UDP_ILOS, 38 },
282 { ICE_PROTOCOL_LAST, 0 },
285 /* C-tag (801.1Q), IPv4:UDP dummy packet */
286 static const u8 dummy_vlan_udp_packet[] = {
287 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
288 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00,
291 0x81, 0x00, /* ICE_ETYPE_OL 12 */
293 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
295 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
296 0x00, 0x01, 0x00, 0x00,
297 0x00, 0x11, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
302 0x00, 0x08, 0x00, 0x00,
304 0x00, 0x00, /* 2 bytes for 4 byte alignment */
307 /* offset info for MAC + IPv4 + TCP dummy packet */
308 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
310 { ICE_ETYPE_OL, 12 },
311 { ICE_IPV4_OFOS, 14 },
313 { ICE_PROTOCOL_LAST, 0 },
316 /* Dummy packet for MAC + IPv4 + TCP */
317 static const u8 dummy_tcp_packet[] = {
318 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
319 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00,
322 0x08, 0x00, /* ICE_ETYPE_OL 12 */
324 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
325 0x00, 0x01, 0x00, 0x00,
326 0x00, 0x06, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x50, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, /* 2 bytes for 4 byte alignment */
339 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
340 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_VLAN_OFOS, 14 },
344 { ICE_IPV4_OFOS, 18 },
346 { ICE_PROTOCOL_LAST, 0 },
349 /* C-tag (801.1Q), IPv4:TCP dummy packet */
350 static const u8 dummy_vlan_tcp_packet[] = {
351 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
352 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00,
355 0x81, 0x00, /* ICE_ETYPE_OL 12 */
357 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
359 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
360 0x00, 0x01, 0x00, 0x00,
361 0x00, 0x06, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
368 0x50, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, /* 2 bytes for 4 byte alignment */
374 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
376 { ICE_ETYPE_OL, 12 },
377 { ICE_IPV6_OFOS, 14 },
379 { ICE_PROTOCOL_LAST, 0 },
382 static const u8 dummy_tcp_ipv6_packet[] = {
383 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
389 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
390 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
391 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x50, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, /* 2 bytes for 4 byte alignment */
409 /* C-tag (802.1Q): IPv6 + TCP */
410 static const struct ice_dummy_pkt_offsets
411 dummy_vlan_tcp_ipv6_packet_offsets[] = {
413 { ICE_ETYPE_OL, 12 },
414 { ICE_VLAN_OFOS, 14 },
415 { ICE_IPV6_OFOS, 18 },
417 { ICE_PROTOCOL_LAST, 0 },
420 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
421 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x81, 0x00, /* ICE_ETYPE_OL 12 */
428 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
430 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
431 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x50, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, /* 2 bytes for 4 byte alignment */
451 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
453 { ICE_ETYPE_OL, 12 },
454 { ICE_IPV6_OFOS, 14 },
455 { ICE_UDP_ILOS, 54 },
456 { ICE_PROTOCOL_LAST, 0 },
459 /* IPv6 + UDP dummy packet */
460 static const u8 dummy_udp_ipv6_packet[] = {
461 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
462 0x00, 0x00, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00,
465 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
467 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
468 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
469 0x00, 0x00, 0x00, 0x00,
470 0x00, 0x00, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
479 0x00, 0x08, 0x00, 0x00,
481 0x00, 0x00, /* 2 bytes for 4 byte alignment */
484 /* C-tag (802.1Q): IPv6 + UDP */
485 static const struct ice_dummy_pkt_offsets
486 dummy_vlan_udp_ipv6_packet_offsets[] = {
488 { ICE_ETYPE_OL, 12 },
489 { ICE_VLAN_OFOS, 14 },
490 { ICE_IPV6_OFOS, 18 },
491 { ICE_UDP_ILOS, 58 },
492 { ICE_PROTOCOL_LAST, 0 },
495 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
496 static const u8 dummy_vlan_udp_ipv6_packet[] = {
497 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x81, 0x00, /* ICE_ETYPE_OL 12 */
503 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
505 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
506 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
507 0x00, 0x00, 0x00, 0x00,
508 0x00, 0x00, 0x00, 0x00,
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
517 0x00, 0x08, 0x00, 0x00,
519 0x00, 0x00, /* 2 bytes for 4 byte alignment */
522 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
524 { ICE_IPV4_OFOS, 14 },
527 { ICE_PROTOCOL_LAST, 0 },
530 static const u8 dummy_udp_gtp_packet[] = {
531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
532 0x00, 0x00, 0x00, 0x00,
533 0x00, 0x00, 0x00, 0x00,
536 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
537 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x11, 0x00, 0x00,
539 0x00, 0x00, 0x00, 0x00,
540 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
543 0x00, 0x1c, 0x00, 0x00,
545 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
546 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x85,
549 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
550 0x00, 0x00, 0x00, 0x00,
553 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
555 { ICE_ETYPE_OL, 12 },
556 { ICE_VLAN_OFOS, 14},
558 { ICE_PROTOCOL_LAST, 0 },
561 static const u8 dummy_pppoe_packet[] = {
562 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00,
566 0x81, 0x00, /* ICE_ETYPE_OL 12 */
568 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
570 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
573 0x00, 0x21, /* PPP Link Layer 24 */
575 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
584 /* this is a recipe to profile association bitmap */
585 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
586 ICE_MAX_NUM_PROFILES);
588 /* this is a profile to recipe association bitmap */
589 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
590 ICE_MAX_NUM_RECIPES);
592 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
595 * ice_collect_result_idx - copy result index values
596 * @buf: buffer that contains the result index
597 * @recp: the recipe struct to copy data into
599 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
600 struct ice_sw_recipe *recp)
602 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
603 ice_set_bit(buf->content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
608 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
609 * @hw: pointer to hardware structure
610 * @recps: struct that we need to populate
611 * @rid: recipe ID that we are populating
612 * @refresh_required: true if we should get recipe to profile mapping from FW
614 * This function is used to populate all the necessary entries into our
615 * bookkeeping so that we have a current list of all the recipes that are
616 * programmed in the firmware.
618 static enum ice_status
619 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
620 bool *refresh_required)
622 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
623 struct ice_aqc_recipe_data_elem *tmp;
624 u16 num_recps = ICE_MAX_NUM_RECIPES;
625 struct ice_prot_lkup_ext *lkup_exts;
626 u16 i, sub_recps, fv_word_idx = 0;
627 enum ice_status status;
629 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
631 /* we need a buffer big enough to accommodate all the recipes */
632 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
633 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
635 return ICE_ERR_NO_MEMORY;
637 tmp[0].recipe_indx = rid;
638 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
639 /* non-zero status meaning recipe doesn't exist */
643 /* Get recipe to profile map so that we can get the fv from lkups that
644 * we read for a recipe from FW. Since we want to minimize the number of
645 * times we make this FW call, just make one call and cache the copy
646 * until a new recipe is added. This operation is only required the
647 * first time to get the changes from FW. Then to search existing
648 * entries we don't need to update the cache again until another recipe
651 if (*refresh_required) {
652 ice_get_recp_to_prof_map(hw);
653 *refresh_required = false;
656 /* Start populating all the entries for recps[rid] based on lkups from
657 * firmware. Note that we are only creating the root recipe in our
660 lkup_exts = &recps[rid].lkup_exts;
662 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
663 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
664 struct ice_recp_grp_entry *rg_entry;
665 u8 prof, idx, prot = 0;
669 rg_entry = (struct ice_recp_grp_entry *)
670 ice_malloc(hw, sizeof(*rg_entry));
672 status = ICE_ERR_NO_MEMORY;
676 idx = root_bufs.recipe_indx;
677 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
679 /* Mark all result indices in this chain */
680 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
681 ice_set_bit(root_bufs.content.result_indx &
682 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
684 /* get the first profile that is associated with rid */
685 prof = ice_find_first_bit(recipe_to_profile[idx],
686 ICE_MAX_NUM_PROFILES);
687 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
688 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
690 rg_entry->fv_idx[i] = lkup_indx;
691 rg_entry->fv_mask[i] =
692 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
694 /* If the recipe is a chained recipe then all its
695 * child recipe's result will have a result index.
696 * To fill fv_words we should not use those result
697 * index, we only need the protocol ids and offsets.
698 * We will skip all the fv_idx which stores result
699 * index in them. We also need to skip any fv_idx which
700 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
701 * valid offset value.
703 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
704 rg_entry->fv_idx[i]) ||
705 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
706 rg_entry->fv_idx[i] == 0)
709 ice_find_prot_off(hw, ICE_BLK_SW, prof,
710 rg_entry->fv_idx[i], &prot, &off);
711 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
712 lkup_exts->fv_words[fv_word_idx].off = off;
715 /* populate rg_list with the data from the child entry of this
718 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
720 /* Propagate some data to the recipe database */
721 recps[idx].is_root = is_root;
722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
723 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
724 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
725 recps[idx].chain_idx = root_bufs.content.result_indx &
726 ~ICE_AQ_RECIPE_RESULT_EN;
727 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
729 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
735 /* Only do the following for root recipes entries */
736 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
737 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
738 recps[idx].root_rid = root_bufs.content.rid &
739 ~ICE_AQ_RECIPE_ID_IS_ROOT;
740 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
743 /* Complete initialization of the root recipe entry */
744 lkup_exts->n_val_words = fv_word_idx;
745 recps[rid].big_recp = (num_recps > 1);
746 recps[rid].n_grp_count = (u8)num_recps;
747 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
748 ice_memdup(hw, tmp, recps[rid].n_grp_count *
749 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
750 if (!recps[rid].root_buf)
753 /* Copy result indexes */
754 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
755 recps[rid].recp_created = true;
763 * ice_get_recp_to_prof_map - updates recipe to profile mapping
764 * @hw: pointer to hardware structure
766 * This function is used to populate recipe_to_profile matrix where index to
767 * this array is the recipe ID and the element is the mapping of which profiles
768 * is this recipe mapped to.
771 ice_get_recp_to_prof_map(struct ice_hw *hw)
773 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
776 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
779 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
780 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
781 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
783 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
784 ICE_MAX_NUM_RECIPES);
785 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
786 if (ice_is_bit_set(r_bitmap, j))
787 ice_set_bit(i, recipe_to_profile[j]);
792 * ice_init_def_sw_recp - initialize the recipe book keeping tables
793 * @hw: pointer to the HW struct
794 * @recp_list: pointer to sw recipe list
796 * Allocate memory for the entire recipe table and initialize the structures/
797 * entries corresponding to basic recipes.
800 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
802 struct ice_sw_recipe *recps;
805 recps = (struct ice_sw_recipe *)
806 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
808 return ICE_ERR_NO_MEMORY;
810 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
811 recps[i].root_rid = i;
812 INIT_LIST_HEAD(&recps[i].filt_rules);
813 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
814 INIT_LIST_HEAD(&recps[i].rg_list);
815 ice_init_lock(&recps[i].filt_rule_lock);
824 * ice_aq_get_sw_cfg - get switch configuration
825 * @hw: pointer to the hardware structure
826 * @buf: pointer to the result buffer
827 * @buf_size: length of the buffer available for response
828 * @req_desc: pointer to requested descriptor
829 * @num_elems: pointer to number of elements
830 * @cd: pointer to command details structure or NULL
832 * Get switch configuration (0x0200) to be placed in 'buff'.
833 * This admin command returns information such as initial VSI/port number
834 * and switch ID it belongs to.
836 * NOTE: *req_desc is both an input/output parameter.
837 * The caller of this function first calls this function with *request_desc set
838 * to 0. If the response from f/w has *req_desc set to 0, all the switch
839 * configuration information has been returned; if non-zero (meaning not all
840 * the information was returned), the caller should call this function again
841 * with *req_desc set to the previous value returned by f/w to get the
842 * next block of switch configuration information.
844 * *num_elems is output only parameter. This reflects the number of elements
845 * in response buffer. The caller of this function to use *num_elems while
846 * parsing the response buffer.
848 static enum ice_status
849 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
850 u16 buf_size, u16 *req_desc, u16 *num_elems,
851 struct ice_sq_cd *cd)
853 struct ice_aqc_get_sw_cfg *cmd;
854 enum ice_status status;
855 struct ice_aq_desc desc;
857 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
858 cmd = &desc.params.get_sw_conf;
859 cmd->element = CPU_TO_LE16(*req_desc);
861 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
863 *req_desc = LE16_TO_CPU(cmd->element);
864 *num_elems = LE16_TO_CPU(cmd->num_elems);
871 * ice_alloc_sw - allocate resources specific to switch
872 * @hw: pointer to the HW struct
873 * @ena_stats: true to turn on VEB stats
874 * @shared_res: true for shared resource, false for dedicated resource
875 * @sw_id: switch ID returned
876 * @counter_id: VEB counter ID returned
878 * allocates switch resources (SWID and VEB counter) (0x0208)
881 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
884 struct ice_aqc_alloc_free_res_elem *sw_buf;
885 struct ice_aqc_res_elem *sw_ele;
886 enum ice_status status;
889 buf_len = sizeof(*sw_buf);
890 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
891 ice_malloc(hw, buf_len);
893 return ICE_ERR_NO_MEMORY;
895 /* Prepare buffer for switch ID.
896 * The number of resource entries in buffer is passed as 1 since only a
897 * single switch/VEB instance is allocated, and hence a single sw_id
900 sw_buf->num_elems = CPU_TO_LE16(1);
902 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
903 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
904 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
906 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
907 ice_aqc_opc_alloc_res, NULL);
910 goto ice_alloc_sw_exit;
912 sw_ele = &sw_buf->elem[0];
913 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
916 /* Prepare buffer for VEB Counter */
917 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
918 struct ice_aqc_alloc_free_res_elem *counter_buf;
919 struct ice_aqc_res_elem *counter_ele;
921 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
922 ice_malloc(hw, buf_len);
924 status = ICE_ERR_NO_MEMORY;
925 goto ice_alloc_sw_exit;
928 /* The number of resource entries in buffer is passed as 1 since
929 * only a single switch/VEB instance is allocated, and hence a
930 * single VEB counter is requested.
932 counter_buf->num_elems = CPU_TO_LE16(1);
933 counter_buf->res_type =
934 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
935 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
936 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
940 ice_free(hw, counter_buf);
941 goto ice_alloc_sw_exit;
943 counter_ele = &counter_buf->elem[0];
944 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
945 ice_free(hw, counter_buf);
949 ice_free(hw, sw_buf);
954 * ice_free_sw - free resources specific to switch
955 * @hw: pointer to the HW struct
956 * @sw_id: switch ID returned
957 * @counter_id: VEB counter ID returned
959 * free switch resources (SWID and VEB counter) (0x0209)
961 * NOTE: This function frees multiple resources. It continues
962 * releasing other resources even after it encounters error.
963 * The error code returned is the last error it encountered.
965 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
967 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
968 enum ice_status status, ret_status;
971 buf_len = sizeof(*sw_buf);
972 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
973 ice_malloc(hw, buf_len);
975 return ICE_ERR_NO_MEMORY;
977 /* Prepare buffer to free for switch ID res.
978 * The number of resource entries in buffer is passed as 1 since only a
979 * single switch/VEB instance is freed, and hence a single sw_id
982 sw_buf->num_elems = CPU_TO_LE16(1);
983 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
984 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
986 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
987 ice_aqc_opc_free_res, NULL);
990 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
992 /* Prepare buffer to free for VEB Counter resource */
993 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
994 ice_malloc(hw, buf_len);
996 ice_free(hw, sw_buf);
997 return ICE_ERR_NO_MEMORY;
1000 /* The number of resource entries in buffer is passed as 1 since only a
1001 * single switch/VEB instance is freed, and hence a single VEB counter
1004 counter_buf->num_elems = CPU_TO_LE16(1);
1005 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1006 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1008 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1009 ice_aqc_opc_free_res, NULL);
1011 ice_debug(hw, ICE_DBG_SW,
1012 "VEB counter resource could not be freed\n");
1013 ret_status = status;
1016 ice_free(hw, counter_buf);
1017 ice_free(hw, sw_buf);
1023 * @hw: pointer to the HW struct
1024 * @vsi_ctx: pointer to a VSI context struct
1025 * @cd: pointer to command details structure or NULL
1027 * Add a VSI context to the hardware (0x0210)
1030 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1031 struct ice_sq_cd *cd)
1033 struct ice_aqc_add_update_free_vsi_resp *res;
1034 struct ice_aqc_add_get_update_free_vsi *cmd;
1035 struct ice_aq_desc desc;
1036 enum ice_status status;
1038 cmd = &desc.params.vsi_cmd;
1039 res = &desc.params.add_update_free_vsi_res;
1041 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1043 if (!vsi_ctx->alloc_from_pool)
1044 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1045 ICE_AQ_VSI_IS_VALID);
1047 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1049 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1051 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1052 sizeof(vsi_ctx->info), cd);
1055 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1056 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1057 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1065 * @hw: pointer to the HW struct
1066 * @vsi_ctx: pointer to a VSI context struct
1067 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1068 * @cd: pointer to command details structure or NULL
1070 * Free VSI context info from hardware (0x0213)
1073 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1074 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1076 struct ice_aqc_add_update_free_vsi_resp *resp;
1077 struct ice_aqc_add_get_update_free_vsi *cmd;
1078 struct ice_aq_desc desc;
1079 enum ice_status status;
1081 cmd = &desc.params.vsi_cmd;
1082 resp = &desc.params.add_update_free_vsi_res;
1084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1086 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1088 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1090 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1092 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1093 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1101 * @hw: pointer to the HW struct
1102 * @vsi_ctx: pointer to a VSI context struct
1103 * @cd: pointer to command details structure or NULL
1105 * Update VSI context in the hardware (0x0211)
1108 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1109 struct ice_sq_cd *cd)
1111 struct ice_aqc_add_update_free_vsi_resp *resp;
1112 struct ice_aqc_add_get_update_free_vsi *cmd;
1113 struct ice_aq_desc desc;
1114 enum ice_status status;
1116 cmd = &desc.params.vsi_cmd;
1117 resp = &desc.params.add_update_free_vsi_res;
1119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1121 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1123 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1125 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1126 sizeof(vsi_ctx->info), cd);
1129 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1130 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1137 * ice_is_vsi_valid - check whether the VSI is valid or not
1138 * @hw: pointer to the HW struct
1139 * @vsi_handle: VSI handle
1141 * check whether the VSI is valid or not
1143 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1145 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1149 * ice_get_hw_vsi_num - return the HW VSI number
1150 * @hw: pointer to the HW struct
1151 * @vsi_handle: VSI handle
1153 * return the HW VSI number
1154 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1156 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1158 return hw->vsi_ctx[vsi_handle]->vsi_num;
1162 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1163 * @hw: pointer to the HW struct
1164 * @vsi_handle: VSI handle
1166 * return the VSI context entry for a given VSI handle
1168 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1170 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1174 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1175 * @hw: pointer to the HW struct
1176 * @vsi_handle: VSI handle
1177 * @vsi: VSI context pointer
1179 * save the VSI context entry for a given VSI handle
1182 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1184 hw->vsi_ctx[vsi_handle] = vsi;
1188 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1189 * @hw: pointer to the HW struct
1190 * @vsi_handle: VSI handle
1192 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1194 struct ice_vsi_ctx *vsi;
1197 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1200 ice_for_each_traffic_class(i) {
1201 if (vsi->lan_q_ctx[i]) {
1202 ice_free(hw, vsi->lan_q_ctx[i]);
1203 vsi->lan_q_ctx[i] = NULL;
1209 * ice_clear_vsi_ctx - clear the VSI context entry
1210 * @hw: pointer to the HW struct
1211 * @vsi_handle: VSI handle
1213 * clear the VSI context entry
1215 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1217 struct ice_vsi_ctx *vsi;
1219 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1221 ice_clear_vsi_q_ctx(hw, vsi_handle);
1223 hw->vsi_ctx[vsi_handle] = NULL;
1228 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1229 * @hw: pointer to the HW struct
1231 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1235 for (i = 0; i < ICE_MAX_VSI; i++)
1236 ice_clear_vsi_ctx(hw, i);
1240 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1241 * @hw: pointer to the HW struct
1242 * @vsi_handle: unique VSI handle provided by drivers
1243 * @vsi_ctx: pointer to a VSI context struct
1244 * @cd: pointer to command details structure or NULL
1246 * Add a VSI context to the hardware also add it into the VSI handle list.
1247 * If this function gets called after reset for existing VSIs then update
1248 * with the new HW VSI number in the corresponding VSI handle list entry.
1251 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1252 struct ice_sq_cd *cd)
1254 struct ice_vsi_ctx *tmp_vsi_ctx;
1255 enum ice_status status;
1257 if (vsi_handle >= ICE_MAX_VSI)
1258 return ICE_ERR_PARAM;
1259 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1262 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1264 /* Create a new VSI context */
1265 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1266 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1268 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1269 return ICE_ERR_NO_MEMORY;
1271 *tmp_vsi_ctx = *vsi_ctx;
1273 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1275 /* update with new HW VSI num */
1276 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1283 * ice_free_vsi- free VSI context from hardware and VSI handle list
1284 * @hw: pointer to the HW struct
1285 * @vsi_handle: unique VSI handle
1286 * @vsi_ctx: pointer to a VSI context struct
1287 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1288 * @cd: pointer to command details structure or NULL
1290 * Free VSI context info from hardware as well as from VSI handle list
1293 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1294 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1296 enum ice_status status;
1298 if (!ice_is_vsi_valid(hw, vsi_handle))
1299 return ICE_ERR_PARAM;
1300 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1301 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1303 ice_clear_vsi_ctx(hw, vsi_handle);
1309 * @hw: pointer to the HW struct
1310 * @vsi_handle: unique VSI handle
1311 * @vsi_ctx: pointer to a VSI context struct
1312 * @cd: pointer to command details structure or NULL
1314 * Update VSI context in the hardware
1317 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1318 struct ice_sq_cd *cd)
1320 if (!ice_is_vsi_valid(hw, vsi_handle))
1321 return ICE_ERR_PARAM;
1322 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1323 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1327 * ice_aq_get_vsi_params
1328 * @hw: pointer to the HW struct
1329 * @vsi_ctx: pointer to a VSI context struct
1330 * @cd: pointer to command details structure or NULL
1332 * Get VSI context info from hardware (0x0212)
1335 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1336 struct ice_sq_cd *cd)
1338 struct ice_aqc_add_get_update_free_vsi *cmd;
1339 struct ice_aqc_get_vsi_resp *resp;
1340 struct ice_aq_desc desc;
1341 enum ice_status status;
1343 cmd = &desc.params.vsi_cmd;
1344 resp = &desc.params.get_vsi_resp;
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1348 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1350 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1351 sizeof(vsi_ctx->info), cd);
1353 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1355 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1356 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1363 * ice_aq_add_update_mir_rule - add/update a mirror rule
1364 * @hw: pointer to the HW struct
1365 * @rule_type: Rule Type
1366 * @dest_vsi: VSI number to which packets will be mirrored
1367 * @count: length of the list
1368 * @mr_buf: buffer for list of mirrored VSI numbers
1369 * @cd: pointer to command details structure or NULL
1372 * Add/Update Mirror Rule (0x260).
1375 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1376 u16 count, struct ice_mir_rule_buf *mr_buf,
1377 struct ice_sq_cd *cd, u16 *rule_id)
1379 struct ice_aqc_add_update_mir_rule *cmd;
1380 struct ice_aq_desc desc;
1381 enum ice_status status;
1382 __le16 *mr_list = NULL;
1385 switch (rule_type) {
1386 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1387 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1388 /* Make sure count and mr_buf are set for these rule_types */
1389 if (!(count && mr_buf))
1390 return ICE_ERR_PARAM;
1392 buf_size = count * sizeof(__le16);
1393 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1395 return ICE_ERR_NO_MEMORY;
1397 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1398 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1399 /* Make sure count and mr_buf are not set for these
1402 if (count || mr_buf)
1403 return ICE_ERR_PARAM;
1406 ice_debug(hw, ICE_DBG_SW,
1407 "Error due to unsupported rule_type %u\n", rule_type);
1408 return ICE_ERR_OUT_OF_RANGE;
1411 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1413 /* Pre-process 'mr_buf' items for add/update of virtual port
1414 * ingress/egress mirroring (but not physical port ingress/egress
1420 for (i = 0; i < count; i++) {
1423 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1425 /* Validate specified VSI number, make sure it is less
1426 * than ICE_MAX_VSI, if not return with error.
1428 if (id >= ICE_MAX_VSI) {
1429 ice_debug(hw, ICE_DBG_SW,
1430 "Error VSI index (%u) out-of-range\n",
1432 ice_free(hw, mr_list);
1433 return ICE_ERR_OUT_OF_RANGE;
1436 /* add VSI to mirror rule */
1439 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1440 else /* remove VSI from mirror rule */
1441 mr_list[i] = CPU_TO_LE16(id);
1445 cmd = &desc.params.add_update_rule;
1446 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1447 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1448 ICE_AQC_RULE_ID_VALID_M);
1449 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1450 cmd->num_entries = CPU_TO_LE16(count);
1451 cmd->dest = CPU_TO_LE16(dest_vsi);
1453 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1455 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1457 ice_free(hw, mr_list);
1463 * ice_aq_delete_mir_rule - delete a mirror rule
1464 * @hw: pointer to the HW struct
1465 * @rule_id: Mirror rule ID (to be deleted)
1466 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1467 * otherwise it is returned to the shared pool
1468 * @cd: pointer to command details structure or NULL
1470 * Delete Mirror Rule (0x261).
1473 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1474 struct ice_sq_cd *cd)
1476 struct ice_aqc_delete_mir_rule *cmd;
1477 struct ice_aq_desc desc;
1479 /* rule_id should be in the range 0...63 */
1480 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1481 return ICE_ERR_OUT_OF_RANGE;
1483 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1485 cmd = &desc.params.del_rule;
1486 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1487 cmd->rule_id = CPU_TO_LE16(rule_id);
1490 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1492 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1496 * ice_aq_alloc_free_vsi_list
1497 * @hw: pointer to the HW struct
1498 * @vsi_list_id: VSI list ID returned or used for lookup
1499 * @lkup_type: switch rule filter lookup type
1500 * @opc: switch rules population command type - pass in the command opcode
1502 * allocates or free a VSI list resource
1504 static enum ice_status
1505 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1506 enum ice_sw_lkup_type lkup_type,
1507 enum ice_adminq_opc opc)
1509 struct ice_aqc_alloc_free_res_elem *sw_buf;
1510 struct ice_aqc_res_elem *vsi_ele;
1511 enum ice_status status;
1514 buf_len = sizeof(*sw_buf);
1515 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1516 ice_malloc(hw, buf_len);
1518 return ICE_ERR_NO_MEMORY;
1519 sw_buf->num_elems = CPU_TO_LE16(1);
1521 if (lkup_type == ICE_SW_LKUP_MAC ||
1522 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1523 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1524 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1525 lkup_type == ICE_SW_LKUP_PROMISC ||
1526 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1527 lkup_type == ICE_SW_LKUP_LAST) {
1528 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1529 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1531 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1533 status = ICE_ERR_PARAM;
1534 goto ice_aq_alloc_free_vsi_list_exit;
1537 if (opc == ice_aqc_opc_free_res)
1538 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1540 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1542 goto ice_aq_alloc_free_vsi_list_exit;
1544 if (opc == ice_aqc_opc_alloc_res) {
1545 vsi_ele = &sw_buf->elem[0];
1546 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1549 ice_aq_alloc_free_vsi_list_exit:
1550 ice_free(hw, sw_buf);
1555 * ice_aq_set_storm_ctrl - Sets storm control configuration
1556 * @hw: pointer to the HW struct
1557 * @bcast_thresh: represents the upper threshold for broadcast storm control
1558 * @mcast_thresh: represents the upper threshold for multicast storm control
1559 * @ctl_bitmask: storm control control knobs
1561 * Sets the storm control configuration (0x0280)
1564 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1567 struct ice_aqc_storm_cfg *cmd;
1568 struct ice_aq_desc desc;
1570 cmd = &desc.params.storm_conf;
1572 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1574 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1575 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1576 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1578 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1582 * ice_aq_get_storm_ctrl - gets storm control configuration
1583 * @hw: pointer to the HW struct
1584 * @bcast_thresh: represents the upper threshold for broadcast storm control
1585 * @mcast_thresh: represents the upper threshold for multicast storm control
1586 * @ctl_bitmask: storm control control knobs
1588 * Gets the storm control configuration (0x0281)
1591 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1594 enum ice_status status;
1595 struct ice_aq_desc desc;
1597 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1599 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1601 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1604 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1607 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1610 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1617 * ice_aq_sw_rules - add/update/remove switch rules
1618 * @hw: pointer to the HW struct
1619 * @rule_list: pointer to switch rule population list
1620 * @rule_list_sz: total size of the rule list in bytes
1621 * @num_rules: number of switch rules in the rule_list
1622 * @opc: switch rules population command type - pass in the command opcode
1623 * @cd: pointer to command details structure or NULL
1625 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1627 static enum ice_status
1628 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1629 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1631 struct ice_aq_desc desc;
1633 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1635 if (opc != ice_aqc_opc_add_sw_rules &&
1636 opc != ice_aqc_opc_update_sw_rules &&
1637 opc != ice_aqc_opc_remove_sw_rules)
1638 return ICE_ERR_PARAM;
1640 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1642 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1643 desc.params.sw_rules.num_rules_fltr_entry_index =
1644 CPU_TO_LE16(num_rules);
1645 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1649 * ice_aq_add_recipe - add switch recipe
1650 * @hw: pointer to the HW struct
1651 * @s_recipe_list: pointer to switch rule population list
1652 * @num_recipes: number of switch recipes in the list
1653 * @cd: pointer to command details structure or NULL
1658 ice_aq_add_recipe(struct ice_hw *hw,
1659 struct ice_aqc_recipe_data_elem *s_recipe_list,
1660 u16 num_recipes, struct ice_sq_cd *cd)
1662 struct ice_aqc_add_get_recipe *cmd;
1663 struct ice_aq_desc desc;
1666 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1667 cmd = &desc.params.add_get_recipe;
1668 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1670 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1671 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1673 buf_size = num_recipes * sizeof(*s_recipe_list);
1675 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1679 * ice_aq_get_recipe - get switch recipe
1680 * @hw: pointer to the HW struct
1681 * @s_recipe_list: pointer to switch rule population list
1682 * @num_recipes: pointer to the number of recipes (input and output)
1683 * @recipe_root: root recipe number of recipe(s) to retrieve
1684 * @cd: pointer to command details structure or NULL
1688 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1689 * On output, *num_recipes will equal the number of entries returned in
1692 * The caller must supply enough space in s_recipe_list to hold all possible
1693 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1696 ice_aq_get_recipe(struct ice_hw *hw,
1697 struct ice_aqc_recipe_data_elem *s_recipe_list,
1698 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1700 struct ice_aqc_add_get_recipe *cmd;
1701 struct ice_aq_desc desc;
1702 enum ice_status status;
1705 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1706 return ICE_ERR_PARAM;
1708 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1709 cmd = &desc.params.add_get_recipe;
1710 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1712 cmd->return_index = CPU_TO_LE16(recipe_root);
1713 cmd->num_sub_recipes = 0;
1715 buf_size = *num_recipes * sizeof(*s_recipe_list);
1717 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1718 /* cppcheck-suppress constArgument */
1719 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1725 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1726 * @hw: pointer to the HW struct
1727 * @profile_id: package profile ID to associate the recipe with
1728 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1729 * @cd: pointer to command details structure or NULL
1730 * Recipe to profile association (0x0291)
1733 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1734 struct ice_sq_cd *cd)
1736 struct ice_aqc_recipe_to_profile *cmd;
1737 struct ice_aq_desc desc;
1739 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1740 cmd = &desc.params.recipe_to_profile;
1741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1742 cmd->profile_id = CPU_TO_LE16(profile_id);
1743 /* Set the recipe ID bit in the bitmask to let the device know which
1744 * profile we are associating the recipe to
1746 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1747 ICE_NONDMA_TO_NONDMA);
1749 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1753 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1754 * @hw: pointer to the HW struct
1755 * @profile_id: package profile ID to associate the recipe with
1756 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1757 * @cd: pointer to command details structure or NULL
1758 * Associate profile ID with given recipe (0x0293)
1761 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1762 struct ice_sq_cd *cd)
1764 struct ice_aqc_recipe_to_profile *cmd;
1765 struct ice_aq_desc desc;
1766 enum ice_status status;
1768 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1769 cmd = &desc.params.recipe_to_profile;
1770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1771 cmd->profile_id = CPU_TO_LE16(profile_id);
1773 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1775 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1776 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1782 * ice_alloc_recipe - add recipe resource
1783 * @hw: pointer to the hardware structure
1784 * @rid: recipe ID returned as response to AQ call
1786 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1788 struct ice_aqc_alloc_free_res_elem *sw_buf;
1789 enum ice_status status;
1792 buf_len = sizeof(*sw_buf);
1793 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1795 return ICE_ERR_NO_MEMORY;
1797 sw_buf->num_elems = CPU_TO_LE16(1);
1798 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1799 ICE_AQC_RES_TYPE_S) |
1800 ICE_AQC_RES_TYPE_FLAG_SHARED);
1801 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802 ice_aqc_opc_alloc_res, NULL);
1804 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1805 ice_free(hw, sw_buf);
1810 /* ice_init_port_info - Initialize port_info with switch configuration data
1811 * @pi: pointer to port_info
1812 * @vsi_port_num: VSI number or port number
1813 * @type: Type of switch element (port or VSI)
1814 * @swid: switch ID of the switch the element is attached to
1815 * @pf_vf_num: PF or VF number
1816 * @is_vf: true if the element is a VF, false otherwise
1819 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1820 u16 swid, u16 pf_vf_num, bool is_vf)
1823 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1824 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1826 pi->pf_vf_num = pf_vf_num;
1828 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1829 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1832 ice_debug(pi->hw, ICE_DBG_SW,
1833 "incorrect VSI/port type received\n");
1838 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1839 * @hw: pointer to the hardware structure
1841 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1843 struct ice_aqc_get_sw_cfg_resp *rbuf;
1844 enum ice_status status;
1845 u16 num_total_ports;
1851 num_total_ports = 1;
1853 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1854 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1857 return ICE_ERR_NO_MEMORY;
1859 /* Multiple calls to ice_aq_get_sw_cfg may be required
1860 * to get all the switch configuration information. The need
1861 * for additional calls is indicated by ice_aq_get_sw_cfg
1862 * writing a non-zero value in req_desc
1865 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1866 &req_desc, &num_elems, NULL);
1871 for (i = 0; i < num_elems; i++) {
1872 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1873 u16 pf_vf_num, swid, vsi_port_num;
1877 ele = rbuf[i].elements;
1878 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1879 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1881 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1882 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1884 swid = LE16_TO_CPU(ele->swid);
1886 if (LE16_TO_CPU(ele->pf_vf_num) &
1887 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1890 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1891 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1894 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1895 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1896 if (j == num_total_ports) {
1897 ice_debug(hw, ICE_DBG_SW,
1898 "more ports than expected\n");
1899 status = ICE_ERR_CFG;
1902 ice_init_port_info(hw->port_info,
1903 vsi_port_num, res_type, swid,
1911 } while (req_desc && !status);
1914 ice_free(hw, (void *)rbuf);
1919 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1920 * @hw: pointer to the hardware structure
1921 * @fi: filter info structure to fill/update
1923 * This helper function populates the lb_en and lan_en elements of the provided
1924 * ice_fltr_info struct using the switch's type and characteristics of the
1925 * switch rule being configured.
1927 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1931 if ((fi->flag & ICE_FLTR_TX) &&
1932 (fi->fltr_act == ICE_FWD_TO_VSI ||
1933 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1934 fi->fltr_act == ICE_FWD_TO_Q ||
1935 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1936 /* Setting LB for prune actions will result in replicated
1937 * packets to the internal switch that will be dropped.
1939 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1942 /* Set lan_en to TRUE if
1943 * 1. The switch is a VEB AND
1945 * 2.1 The lookup is a directional lookup like ethertype,
1946 * promiscuous, ethertype-MAC, promiscuous-VLAN
1947 * and default-port OR
1948 * 2.2 The lookup is VLAN, OR
1949 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1950 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1954 * The switch is a VEPA.
1956 * In all other cases, the LAN enable has to be set to false.
1959 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1960 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1961 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1962 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1963 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1964 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1965 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1966 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1967 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1968 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1977 * ice_fill_sw_rule - Helper function to fill switch rule structure
1978 * @hw: pointer to the hardware structure
1979 * @f_info: entry containing packet forwarding information
1980 * @s_rule: switch rule structure to be filled in based on mac_entry
1981 * @opc: switch rules population command type - pass in the command opcode
1984 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1985 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1987 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1995 if (opc == ice_aqc_opc_remove_sw_rules) {
1996 s_rule->pdata.lkup_tx_rx.act = 0;
1997 s_rule->pdata.lkup_tx_rx.index =
1998 CPU_TO_LE16(f_info->fltr_rule_id);
1999 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2003 eth_hdr_sz = sizeof(dummy_eth_header);
2004 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2006 /* initialize the ether header with a dummy header */
2007 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2008 ice_fill_sw_info(hw, f_info);
2010 switch (f_info->fltr_act) {
2011 case ICE_FWD_TO_VSI:
2012 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2013 ICE_SINGLE_ACT_VSI_ID_M;
2014 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2015 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2016 ICE_SINGLE_ACT_VALID_BIT;
2018 case ICE_FWD_TO_VSI_LIST:
2019 act |= ICE_SINGLE_ACT_VSI_LIST;
2020 act |= (f_info->fwd_id.vsi_list_id <<
2021 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2022 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2023 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2024 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2025 ICE_SINGLE_ACT_VALID_BIT;
2028 act |= ICE_SINGLE_ACT_TO_Q;
2029 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2030 ICE_SINGLE_ACT_Q_INDEX_M;
2032 case ICE_DROP_PACKET:
2033 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2034 ICE_SINGLE_ACT_VALID_BIT;
2036 case ICE_FWD_TO_QGRP:
2037 q_rgn = f_info->qgrp_size > 0 ?
2038 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2039 act |= ICE_SINGLE_ACT_TO_Q;
2040 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2041 ICE_SINGLE_ACT_Q_INDEX_M;
2042 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2043 ICE_SINGLE_ACT_Q_REGION_M;
2050 act |= ICE_SINGLE_ACT_LB_ENABLE;
2052 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2054 switch (f_info->lkup_type) {
2055 case ICE_SW_LKUP_MAC:
2056 daddr = f_info->l_data.mac.mac_addr;
2058 case ICE_SW_LKUP_VLAN:
2059 vlan_id = f_info->l_data.vlan.vlan_id;
2060 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2061 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2062 act |= ICE_SINGLE_ACT_PRUNE;
2063 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2066 case ICE_SW_LKUP_ETHERTYPE_MAC:
2067 daddr = f_info->l_data.ethertype_mac.mac_addr;
2069 case ICE_SW_LKUP_ETHERTYPE:
2070 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2071 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2073 case ICE_SW_LKUP_MAC_VLAN:
2074 daddr = f_info->l_data.mac_vlan.mac_addr;
2075 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2077 case ICE_SW_LKUP_PROMISC_VLAN:
2078 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2080 case ICE_SW_LKUP_PROMISC:
2081 daddr = f_info->l_data.mac_vlan.mac_addr;
2087 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2088 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2089 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2091 /* Recipe set depending on lookup type */
2092 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2093 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2094 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2097 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2098 ICE_NONDMA_TO_NONDMA);
2100 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2101 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2102 *off = CPU_TO_BE16(vlan_id);
2105 /* Create the switch rule with the final dummy Ethernet header */
2106 if (opc != ice_aqc_opc_update_sw_rules)
2107 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2111 * ice_add_marker_act
2112 * @hw: pointer to the hardware structure
2113 * @m_ent: the management entry for which sw marker needs to be added
2114 * @sw_marker: sw marker to tag the Rx descriptor with
2115 * @l_id: large action resource ID
2117 * Create a large action to hold software marker and update the switch rule
2118 * entry pointed by m_ent with newly created large action
2120 static enum ice_status
2121 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2122 u16 sw_marker, u16 l_id)
2124 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2125 /* For software marker we need 3 large actions
2126 * 1. FWD action: FWD TO VSI or VSI LIST
2127 * 2. GENERIC VALUE action to hold the profile ID
2128 * 3. GENERIC VALUE action to hold the software marker ID
2130 const u16 num_lg_acts = 3;
2131 enum ice_status status;
2137 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2138 return ICE_ERR_PARAM;
2140 /* Create two back-to-back switch rules and submit them to the HW using
2141 * one memory buffer:
2145 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2146 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2147 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2149 return ICE_ERR_NO_MEMORY;
2151 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2153 /* Fill in the first switch rule i.e. large action */
2154 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2155 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2156 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2158 /* First action VSI forwarding or VSI list forwarding depending on how
2161 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2162 m_ent->fltr_info.fwd_id.hw_vsi_id;
2164 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2165 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2166 ICE_LG_ACT_VSI_LIST_ID_M;
2167 if (m_ent->vsi_count > 1)
2168 act |= ICE_LG_ACT_VSI_LIST;
2169 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2171 /* Second action descriptor type */
2172 act = ICE_LG_ACT_GENERIC;
2174 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2175 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2177 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2178 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2180 /* Third action Marker value */
2181 act |= ICE_LG_ACT_GENERIC;
2182 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2183 ICE_LG_ACT_GENERIC_VALUE_M;
2185 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2187 /* call the fill switch rule to fill the lookup Tx Rx structure */
2188 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2189 ice_aqc_opc_update_sw_rules);
2191 /* Update the action to point to the large action ID */
2192 rx_tx->pdata.lkup_tx_rx.act =
2193 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2194 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2195 ICE_SINGLE_ACT_PTR_VAL_M));
2197 /* Use the filter rule ID of the previously created rule with single
2198 * act. Once the update happens, hardware will treat this as large
2201 rx_tx->pdata.lkup_tx_rx.index =
2202 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2204 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2205 ice_aqc_opc_update_sw_rules, NULL);
2207 m_ent->lg_act_idx = l_id;
2208 m_ent->sw_marker_id = sw_marker;
2211 ice_free(hw, lg_act);
2216 * ice_add_counter_act - add/update filter rule with counter action
2217 * @hw: pointer to the hardware structure
2218 * @m_ent: the management entry for which counter needs to be added
2219 * @counter_id: VLAN counter ID returned as part of allocate resource
2220 * @l_id: large action resource ID
2222 static enum ice_status
2223 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2224 u16 counter_id, u16 l_id)
2226 struct ice_aqc_sw_rules_elem *lg_act;
2227 struct ice_aqc_sw_rules_elem *rx_tx;
2228 enum ice_status status;
2229 /* 2 actions will be added while adding a large action counter */
2230 const int num_acts = 2;
2237 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2238 return ICE_ERR_PARAM;
2240 /* Create two back-to-back switch rules and submit them to the HW using
2241 * one memory buffer:
2245 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2246 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2247 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2250 return ICE_ERR_NO_MEMORY;
2252 rx_tx = (struct ice_aqc_sw_rules_elem *)
2253 ((u8 *)lg_act + lg_act_size);
2255 /* Fill in the first switch rule i.e. large action */
2256 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2257 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2258 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2260 /* First action VSI forwarding or VSI list forwarding depending on how
2263 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2264 m_ent->fltr_info.fwd_id.hw_vsi_id;
2266 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2267 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2268 ICE_LG_ACT_VSI_LIST_ID_M;
2269 if (m_ent->vsi_count > 1)
2270 act |= ICE_LG_ACT_VSI_LIST;
2271 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2273 /* Second action counter ID */
2274 act = ICE_LG_ACT_STAT_COUNT;
2275 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2276 ICE_LG_ACT_STAT_COUNT_M;
2277 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2279 /* call the fill switch rule to fill the lookup Tx Rx structure */
2280 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2281 ice_aqc_opc_update_sw_rules);
2283 act = ICE_SINGLE_ACT_PTR;
2284 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2285 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2287 /* Use the filter rule ID of the previously created rule with single
2288 * act. Once the update happens, hardware will treat this as large
2291 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2292 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2294 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2295 ice_aqc_opc_update_sw_rules, NULL);
2297 m_ent->lg_act_idx = l_id;
2298 m_ent->counter_index = counter_id;
2301 ice_free(hw, lg_act);
2306 * ice_create_vsi_list_map
2307 * @hw: pointer to the hardware structure
2308 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2309 * @num_vsi: number of VSI handles in the array
2310 * @vsi_list_id: VSI list ID generated as part of allocate resource
2312 * Helper function to create a new entry of VSI list ID to VSI mapping
2313 * using the given VSI list ID
2315 static struct ice_vsi_list_map_info *
2316 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2319 struct ice_switch_info *sw = hw->switch_info;
2320 struct ice_vsi_list_map_info *v_map;
2323 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2328 v_map->vsi_list_id = vsi_list_id;
2330 for (i = 0; i < num_vsi; i++)
2331 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2333 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2338 * ice_update_vsi_list_rule
2339 * @hw: pointer to the hardware structure
2340 * @vsi_handle_arr: array of VSI handles to form a VSI list
2341 * @num_vsi: number of VSI handles in the array
2342 * @vsi_list_id: VSI list ID generated as part of allocate resource
2343 * @remove: Boolean value to indicate if this is a remove action
2344 * @opc: switch rules population command type - pass in the command opcode
2345 * @lkup_type: lookup type of the filter
2347 * Call AQ command to add a new switch rule or update existing switch rule
2348 * using the given VSI list ID
2350 static enum ice_status
2351 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2352 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2353 enum ice_sw_lkup_type lkup_type)
2355 struct ice_aqc_sw_rules_elem *s_rule;
2356 enum ice_status status;
2362 return ICE_ERR_PARAM;
2364 if (lkup_type == ICE_SW_LKUP_MAC ||
2365 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2366 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2367 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2368 lkup_type == ICE_SW_LKUP_PROMISC ||
2369 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2370 lkup_type == ICE_SW_LKUP_LAST)
2371 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2372 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2373 else if (lkup_type == ICE_SW_LKUP_VLAN)
2374 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2375 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2377 return ICE_ERR_PARAM;
2379 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2380 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2382 return ICE_ERR_NO_MEMORY;
2383 for (i = 0; i < num_vsi; i++) {
2384 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2385 status = ICE_ERR_PARAM;
2388 /* AQ call requires hw_vsi_id(s) */
2389 s_rule->pdata.vsi_list.vsi[i] =
2390 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2393 s_rule->type = CPU_TO_LE16(rule_type);
2394 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2395 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2397 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2400 ice_free(hw, s_rule);
2405 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2406 * @hw: pointer to the HW struct
2407 * @vsi_handle_arr: array of VSI handles to form a VSI list
2408 * @num_vsi: number of VSI handles in the array
2409 * @vsi_list_id: stores the ID of the VSI list to be created
2410 * @lkup_type: switch rule filter's lookup type
2412 static enum ice_status
2413 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2414 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2416 enum ice_status status;
2418 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2419 ice_aqc_opc_alloc_res);
2423 /* Update the newly created VSI list to include the specified VSIs */
2424 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2425 *vsi_list_id, false,
2426 ice_aqc_opc_add_sw_rules, lkup_type);
2430 * ice_create_pkt_fwd_rule
2431 * @hw: pointer to the hardware structure
2432 * @recp_list: corresponding filter management list
2433 * @f_entry: entry containing packet forwarding information
2435 * Create switch rule with given filter information and add an entry
2436 * to the corresponding filter management list to track this switch rule
2439 static enum ice_status
2440 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2441 struct ice_fltr_list_entry *f_entry)
2443 struct ice_fltr_mgmt_list_entry *fm_entry;
2444 struct ice_aqc_sw_rules_elem *s_rule;
2445 enum ice_status status;
2447 s_rule = (struct ice_aqc_sw_rules_elem *)
2448 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2450 return ICE_ERR_NO_MEMORY;
2451 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2452 ice_malloc(hw, sizeof(*fm_entry));
2454 status = ICE_ERR_NO_MEMORY;
2455 goto ice_create_pkt_fwd_rule_exit;
2458 fm_entry->fltr_info = f_entry->fltr_info;
2460 /* Initialize all the fields for the management entry */
2461 fm_entry->vsi_count = 1;
2462 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2463 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2464 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2466 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2467 ice_aqc_opc_add_sw_rules);
2469 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2470 ice_aqc_opc_add_sw_rules, NULL);
2472 ice_free(hw, fm_entry);
2473 goto ice_create_pkt_fwd_rule_exit;
2476 f_entry->fltr_info.fltr_rule_id =
2477 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2478 fm_entry->fltr_info.fltr_rule_id =
2479 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2481 /* The book keeping entries will get removed when base driver
2482 * calls remove filter AQ command
2484 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2486 ice_create_pkt_fwd_rule_exit:
2487 ice_free(hw, s_rule);
2492 * ice_update_pkt_fwd_rule
2493 * @hw: pointer to the hardware structure
2494 * @f_info: filter information for switch rule
2496 * Call AQ command to update a previously created switch rule with a
2499 static enum ice_status
2500 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2502 struct ice_aqc_sw_rules_elem *s_rule;
2503 enum ice_status status;
2505 s_rule = (struct ice_aqc_sw_rules_elem *)
2506 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2508 return ICE_ERR_NO_MEMORY;
2510 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2512 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2514 /* Update switch rule with new rule set to forward VSI list */
2515 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2516 ice_aqc_opc_update_sw_rules, NULL);
2518 ice_free(hw, s_rule);
2523 * ice_update_sw_rule_bridge_mode
2524 * @hw: pointer to the HW struct
2526 * Updates unicast switch filter rules based on VEB/VEPA mode
2528 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2530 struct ice_switch_info *sw = hw->switch_info;
2531 struct ice_fltr_mgmt_list_entry *fm_entry;
2532 enum ice_status status = ICE_SUCCESS;
2533 struct LIST_HEAD_TYPE *rule_head;
2534 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2536 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2537 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2539 ice_acquire_lock(rule_lock);
2540 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2542 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2543 u8 *addr = fi->l_data.mac.mac_addr;
2545 /* Update unicast Tx rules to reflect the selected
2548 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2549 (fi->fltr_act == ICE_FWD_TO_VSI ||
2550 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2551 fi->fltr_act == ICE_FWD_TO_Q ||
2552 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2553 status = ice_update_pkt_fwd_rule(hw, fi);
2559 ice_release_lock(rule_lock);
2565 * ice_add_update_vsi_list
2566 * @hw: pointer to the hardware structure
2567 * @m_entry: pointer to current filter management list entry
2568 * @cur_fltr: filter information from the book keeping entry
2569 * @new_fltr: filter information with the new VSI to be added
2571 * Call AQ command to add or update previously created VSI list with new VSI.
2573 * Helper function to do book keeping associated with adding filter information
2574 * The algorithm to do the book keeping is described below :
2575 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2576 * if only one VSI has been added till now
2577 * Allocate a new VSI list and add two VSIs
2578 * to this list using switch rule command
2579 * Update the previously created switch rule with the
2580 * newly created VSI list ID
2581 * if a VSI list was previously created
2582 * Add the new VSI to the previously created VSI list set
2583 * using the update switch rule command
2585 static enum ice_status
2586 ice_add_update_vsi_list(struct ice_hw *hw,
2587 struct ice_fltr_mgmt_list_entry *m_entry,
2588 struct ice_fltr_info *cur_fltr,
2589 struct ice_fltr_info *new_fltr)
2591 enum ice_status status = ICE_SUCCESS;
2592 u16 vsi_list_id = 0;
2594 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2595 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2596 return ICE_ERR_NOT_IMPL;
2598 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2599 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2600 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2601 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2602 return ICE_ERR_NOT_IMPL;
2604 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2605 /* Only one entry existed in the mapping and it was not already
2606 * a part of a VSI list. So, create a VSI list with the old and
2609 struct ice_fltr_info tmp_fltr;
2610 u16 vsi_handle_arr[2];
2612 /* A rule already exists with the new VSI being added */
2613 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2614 return ICE_ERR_ALREADY_EXISTS;
2616 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2617 vsi_handle_arr[1] = new_fltr->vsi_handle;
2618 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2620 new_fltr->lkup_type);
2624 tmp_fltr = *new_fltr;
2625 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2626 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2627 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2628 /* Update the previous switch rule of "MAC forward to VSI" to
2629 * "MAC fwd to VSI list"
2631 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2635 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2636 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2637 m_entry->vsi_list_info =
2638 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2641 /* If this entry was large action then the large action needs
2642 * to be updated to point to FWD to VSI list
2644 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2646 ice_add_marker_act(hw, m_entry,
2647 m_entry->sw_marker_id,
2648 m_entry->lg_act_idx);
2650 u16 vsi_handle = new_fltr->vsi_handle;
2651 enum ice_adminq_opc opcode;
2653 if (!m_entry->vsi_list_info)
2656 /* A rule already exists with the new VSI being added */
2657 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2660 /* Update the previously created VSI list set with
2661 * the new VSI ID passed in
2663 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2664 opcode = ice_aqc_opc_update_sw_rules;
2666 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2667 vsi_list_id, false, opcode,
2668 new_fltr->lkup_type);
2669 /* update VSI list mapping info with new VSI ID */
2671 ice_set_bit(vsi_handle,
2672 m_entry->vsi_list_info->vsi_map);
2675 m_entry->vsi_count++;
2680 * ice_find_rule_entry - Search a rule entry
2681 * @list_head: head of rule list
2682 * @f_info: rule information
2684 * Helper function to search for a given rule entry
2685 * Returns pointer to entry storing the rule if found
2687 static struct ice_fltr_mgmt_list_entry *
2688 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2689 struct ice_fltr_info *f_info)
2691 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2693 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2695 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2696 sizeof(f_info->l_data)) &&
2697 f_info->flag == list_itr->fltr_info.flag) {
2706 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2707 * @recp_list: VSI lists needs to be searched
2708 * @vsi_handle: VSI handle to be found in VSI list
2709 * @vsi_list_id: VSI list ID found containing vsi_handle
2711 * Helper function to search a VSI list with single entry containing given VSI
2712 * handle element. This can be extended further to search VSI list with more
2713 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2715 static struct ice_vsi_list_map_info *
2716 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2719 struct ice_vsi_list_map_info *map_info = NULL;
2720 struct LIST_HEAD_TYPE *list_head;
2722 list_head = &recp_list->filt_rules;
2723 if (recp_list->adv_rule) {
2724 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2726 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2727 ice_adv_fltr_mgmt_list_entry,
2729 if (list_itr->vsi_list_info) {
2730 map_info = list_itr->vsi_list_info;
2731 if (ice_is_bit_set(map_info->vsi_map,
2733 *vsi_list_id = map_info->vsi_list_id;
2739 struct ice_fltr_mgmt_list_entry *list_itr;
2741 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2742 ice_fltr_mgmt_list_entry,
2744 if (list_itr->vsi_count == 1 &&
2745 list_itr->vsi_list_info) {
2746 map_info = list_itr->vsi_list_info;
2747 if (ice_is_bit_set(map_info->vsi_map,
2749 *vsi_list_id = map_info->vsi_list_id;
2759 * ice_add_rule_internal - add rule for a given lookup type
2760 * @hw: pointer to the hardware structure
2761 * @recp_list: recipe list for which rule has to be added
2762 * @lport: logic port number on which function add rule
2763 * @f_entry: structure containing MAC forwarding information
2765 * Adds or updates the rule lists for a given recipe
2767 static enum ice_status
2768 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2769 u8 lport, struct ice_fltr_list_entry *f_entry)
2771 struct ice_fltr_info *new_fltr, *cur_fltr;
2772 struct ice_fltr_mgmt_list_entry *m_entry;
2773 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2774 enum ice_status status = ICE_SUCCESS;
2776 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2777 return ICE_ERR_PARAM;
2779 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2780 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2781 f_entry->fltr_info.fwd_id.hw_vsi_id =
2782 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2784 rule_lock = &recp_list->filt_rule_lock;
2786 ice_acquire_lock(rule_lock);
2787 new_fltr = &f_entry->fltr_info;
2788 if (new_fltr->flag & ICE_FLTR_RX)
2789 new_fltr->src = lport;
2790 else if (new_fltr->flag & ICE_FLTR_TX)
2792 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2794 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2796 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2797 goto exit_add_rule_internal;
2800 cur_fltr = &m_entry->fltr_info;
2801 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2803 exit_add_rule_internal:
2804 ice_release_lock(rule_lock);
2809 * ice_remove_vsi_list_rule
2810 * @hw: pointer to the hardware structure
2811 * @vsi_list_id: VSI list ID generated as part of allocate resource
2812 * @lkup_type: switch rule filter lookup type
2814 * The VSI list should be emptied before this function is called to remove the
2817 static enum ice_status
2818 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2819 enum ice_sw_lkup_type lkup_type)
2821 struct ice_aqc_sw_rules_elem *s_rule;
2822 enum ice_status status;
2825 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2826 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2828 return ICE_ERR_NO_MEMORY;
2830 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2831 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2833 /* Free the vsi_list resource that we allocated. It is assumed that the
2834 * list is empty at this point.
2836 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2837 ice_aqc_opc_free_res);
2839 ice_free(hw, s_rule);
2844 * ice_rem_update_vsi_list
2845 * @hw: pointer to the hardware structure
2846 * @vsi_handle: VSI handle of the VSI to remove
2847 * @fm_list: filter management entry for which the VSI list management needs to
2850 static enum ice_status
2851 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2852 struct ice_fltr_mgmt_list_entry *fm_list)
2854 enum ice_sw_lkup_type lkup_type;
2855 enum ice_status status = ICE_SUCCESS;
2858 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2859 fm_list->vsi_count == 0)
2860 return ICE_ERR_PARAM;
2862 /* A rule with the VSI being removed does not exist */
2863 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2864 return ICE_ERR_DOES_NOT_EXIST;
2866 lkup_type = fm_list->fltr_info.lkup_type;
2867 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2868 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2869 ice_aqc_opc_update_sw_rules,
2874 fm_list->vsi_count--;
2875 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2877 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2878 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2879 struct ice_vsi_list_map_info *vsi_list_info =
2880 fm_list->vsi_list_info;
2883 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2885 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2886 return ICE_ERR_OUT_OF_RANGE;
2888 /* Make sure VSI list is empty before removing it below */
2889 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2891 ice_aqc_opc_update_sw_rules,
2896 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2897 tmp_fltr_info.fwd_id.hw_vsi_id =
2898 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2899 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2900 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2902 ice_debug(hw, ICE_DBG_SW,
2903 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2904 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2908 fm_list->fltr_info = tmp_fltr_info;
2911 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2912 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2913 struct ice_vsi_list_map_info *vsi_list_info =
2914 fm_list->vsi_list_info;
2916 /* Remove the VSI list since it is no longer used */
2917 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2919 ice_debug(hw, ICE_DBG_SW,
2920 "Failed to remove VSI list %d, error %d\n",
2921 vsi_list_id, status);
2925 LIST_DEL(&vsi_list_info->list_entry);
2926 ice_free(hw, vsi_list_info);
2927 fm_list->vsi_list_info = NULL;
2934 * ice_remove_rule_internal - Remove a filter rule of a given type
2936 * @hw: pointer to the hardware structure
2937 * @recp_list: recipe list for which the rule needs to removed
2938 * @f_entry: rule entry containing filter information
2940 static enum ice_status
2941 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2942 struct ice_fltr_list_entry *f_entry)
2944 struct ice_fltr_mgmt_list_entry *list_elem;
2945 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2946 enum ice_status status = ICE_SUCCESS;
2947 bool remove_rule = false;
2950 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2951 return ICE_ERR_PARAM;
2952 f_entry->fltr_info.fwd_id.hw_vsi_id =
2953 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2955 rule_lock = &recp_list->filt_rule_lock;
2956 ice_acquire_lock(rule_lock);
2957 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2958 &f_entry->fltr_info);
2960 status = ICE_ERR_DOES_NOT_EXIST;
2964 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2966 } else if (!list_elem->vsi_list_info) {
2967 status = ICE_ERR_DOES_NOT_EXIST;
2969 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2970 /* a ref_cnt > 1 indicates that the vsi_list is being
2971 * shared by multiple rules. Decrement the ref_cnt and
2972 * remove this rule, but do not modify the list, as it
2973 * is in-use by other rules.
2975 list_elem->vsi_list_info->ref_cnt--;
2978 /* a ref_cnt of 1 indicates the vsi_list is only used
2979 * by one rule. However, the original removal request is only
2980 * for a single VSI. Update the vsi_list first, and only
2981 * remove the rule if there are no further VSIs in this list.
2983 vsi_handle = f_entry->fltr_info.vsi_handle;
2984 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2987 /* if VSI count goes to zero after updating the VSI list */
2988 if (list_elem->vsi_count == 0)
2993 /* Remove the lookup rule */
2994 struct ice_aqc_sw_rules_elem *s_rule;
2996 s_rule = (struct ice_aqc_sw_rules_elem *)
2997 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2999 status = ICE_ERR_NO_MEMORY;
3003 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3004 ice_aqc_opc_remove_sw_rules);
3006 status = ice_aq_sw_rules(hw, s_rule,
3007 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3008 ice_aqc_opc_remove_sw_rules, NULL);
3010 /* Remove a book keeping from the list */
3011 ice_free(hw, s_rule);
3016 LIST_DEL(&list_elem->list_entry);
3017 ice_free(hw, list_elem);
3020 ice_release_lock(rule_lock);
3025 * ice_aq_get_res_alloc - get allocated resources
3026 * @hw: pointer to the HW struct
3027 * @num_entries: pointer to u16 to store the number of resource entries returned
3028 * @buf: pointer to user-supplied buffer
3029 * @buf_size: size of buff
3030 * @cd: pointer to command details structure or NULL
3032 * The user-supplied buffer must be large enough to store the resource
3033 * information for all resource types. Each resource type is an
3034 * ice_aqc_get_res_resp_data_elem structure.
3037 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3038 u16 buf_size, struct ice_sq_cd *cd)
3040 struct ice_aqc_get_res_alloc *resp;
3041 enum ice_status status;
3042 struct ice_aq_desc desc;
3045 return ICE_ERR_BAD_PTR;
3047 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3048 return ICE_ERR_INVAL_SIZE;
3050 resp = &desc.params.get_res;
3052 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3053 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3055 if (!status && num_entries)
3056 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3062 * ice_aq_get_res_descs - get allocated resource descriptors
3063 * @hw: pointer to the hardware structure
3064 * @num_entries: number of resource entries in buffer
3065 * @buf: Indirect buffer to hold data parameters and response
3066 * @buf_size: size of buffer for indirect commands
3067 * @res_type: resource type
3068 * @res_shared: is resource shared
3069 * @desc_id: input - first desc ID to start; output - next desc ID
3070 * @cd: pointer to command details structure or NULL
3073 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3074 struct ice_aqc_get_allocd_res_desc_resp *buf,
3075 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3076 struct ice_sq_cd *cd)
3078 struct ice_aqc_get_allocd_res_desc *cmd;
3079 struct ice_aq_desc desc;
3080 enum ice_status status;
3082 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3084 cmd = &desc.params.get_res_desc;
3087 return ICE_ERR_PARAM;
3089 if (buf_size != (num_entries * sizeof(*buf)))
3090 return ICE_ERR_PARAM;
3092 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3094 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3095 ICE_AQC_RES_TYPE_M) | (res_shared ?
3096 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3097 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3099 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3101 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3107 * ice_add_mac_rule - Add a MAC address based filter rule
3108 * @hw: pointer to the hardware structure
3109 * @m_list: list of MAC addresses and forwarding information
3110 * @sw: pointer to switch info struct for which function add rule
3111 * @lport: logic port number on which function add rule
3113 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3114 * multiple unicast addresses, the function assumes that all the
3115 * addresses are unique in a given add_mac call. It doesn't
3116 * check for duplicates in this case, removing duplicates from a given
3117 * list should be taken care of in the caller of this function.
3119 static enum ice_status
3120 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3121 struct ice_switch_info *sw, u8 lport)
3123 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3124 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3125 struct ice_fltr_list_entry *m_list_itr;
3126 struct LIST_HEAD_TYPE *rule_head;
3127 u16 elem_sent, total_elem_left;
3128 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3129 enum ice_status status = ICE_SUCCESS;
3130 u16 num_unicast = 0;
3134 rule_lock = &recp_list->filt_rule_lock;
3135 rule_head = &recp_list->filt_rules;
3137 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3139 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3143 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3144 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3145 if (!ice_is_vsi_valid(hw, vsi_handle))
3146 return ICE_ERR_PARAM;
3147 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3148 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3149 /* update the src in case it is VSI num */
3150 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3151 return ICE_ERR_PARAM;
3152 m_list_itr->fltr_info.src = hw_vsi_id;
3153 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3154 IS_ZERO_ETHER_ADDR(add))
3155 return ICE_ERR_PARAM;
3156 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3157 /* Don't overwrite the unicast address */
3158 ice_acquire_lock(rule_lock);
3159 if (ice_find_rule_entry(rule_head,
3160 &m_list_itr->fltr_info)) {
3161 ice_release_lock(rule_lock);
3162 return ICE_ERR_ALREADY_EXISTS;
3164 ice_release_lock(rule_lock);
3166 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3167 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3168 m_list_itr->status =
3169 ice_add_rule_internal(hw, recp_list, lport,
3171 if (m_list_itr->status)
3172 return m_list_itr->status;
3176 ice_acquire_lock(rule_lock);
3177 /* Exit if no suitable entries were found for adding bulk switch rule */
3179 status = ICE_SUCCESS;
3180 goto ice_add_mac_exit;
3184 /* Allocate switch rule buffer for the bulk update for unicast */
3185 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3186 s_rule = (struct ice_aqc_sw_rules_elem *)
3187 ice_calloc(hw, num_unicast, s_rule_size);
3189 status = ICE_ERR_NO_MEMORY;
3190 goto ice_add_mac_exit;
3194 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3196 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3197 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3199 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3200 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3201 ice_aqc_opc_add_sw_rules);
3202 r_iter = (struct ice_aqc_sw_rules_elem *)
3203 ((u8 *)r_iter + s_rule_size);
3207 /* Call AQ bulk switch rule update for all unicast addresses */
3209 /* Call AQ switch rule in AQ_MAX chunk */
3210 for (total_elem_left = num_unicast; total_elem_left > 0;
3211 total_elem_left -= elem_sent) {
3212 struct ice_aqc_sw_rules_elem *entry = r_iter;
3214 elem_sent = min(total_elem_left,
3215 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3216 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3217 elem_sent, ice_aqc_opc_add_sw_rules,
3220 goto ice_add_mac_exit;
3221 r_iter = (struct ice_aqc_sw_rules_elem *)
3222 ((u8 *)r_iter + (elem_sent * s_rule_size));
3225 /* Fill up rule ID based on the value returned from FW */
3227 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3229 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3230 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3231 struct ice_fltr_mgmt_list_entry *fm_entry;
3233 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3234 f_info->fltr_rule_id =
3235 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3236 f_info->fltr_act = ICE_FWD_TO_VSI;
3237 /* Create an entry to track this MAC address */
3238 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3239 ice_malloc(hw, sizeof(*fm_entry));
3241 status = ICE_ERR_NO_MEMORY;
3242 goto ice_add_mac_exit;
3244 fm_entry->fltr_info = *f_info;
3245 fm_entry->vsi_count = 1;
3246 /* The book keeping entries will get removed when
3247 * base driver calls remove filter AQ command
3250 LIST_ADD(&fm_entry->list_entry, rule_head);
3251 r_iter = (struct ice_aqc_sw_rules_elem *)
3252 ((u8 *)r_iter + s_rule_size);
3257 ice_release_lock(rule_lock);
3259 ice_free(hw, s_rule);
3264 * ice_add_mac - Add a MAC address based filter rule
3265 * @hw: pointer to the hardware structure
3266 * @m_list: list of MAC addresses and forwarding information
3268 * Function add MAC rule for logical port from HW struct
3271 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3274 return ICE_ERR_PARAM;
3276 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3277 hw->port_info->lport);
3281 * ice_add_vlan_internal - Add one VLAN based filter rule
3282 * @hw: pointer to the hardware structure
3283 * @recp_list: recipe list for which rule has to be added
3284 * @f_entry: filter entry containing one VLAN information
3286 static enum ice_status
3287 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3288 struct ice_fltr_list_entry *f_entry)
3290 struct ice_fltr_mgmt_list_entry *v_list_itr;
3291 struct ice_fltr_info *new_fltr, *cur_fltr;
3292 enum ice_sw_lkup_type lkup_type;
3293 u16 vsi_list_id = 0, vsi_handle;
3294 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3295 enum ice_status status = ICE_SUCCESS;
3297 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3298 return ICE_ERR_PARAM;
3300 f_entry->fltr_info.fwd_id.hw_vsi_id =
3301 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3302 new_fltr = &f_entry->fltr_info;
3304 /* VLAN ID should only be 12 bits */
3305 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3306 return ICE_ERR_PARAM;
3308 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3309 return ICE_ERR_PARAM;
3311 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3312 lkup_type = new_fltr->lkup_type;
3313 vsi_handle = new_fltr->vsi_handle;
3314 rule_lock = &recp_list->filt_rule_lock;
3315 ice_acquire_lock(rule_lock);
3316 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3318 struct ice_vsi_list_map_info *map_info = NULL;
3320 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3321 /* All VLAN pruning rules use a VSI list. Check if
3322 * there is already a VSI list containing VSI that we
3323 * want to add. If found, use the same vsi_list_id for
3324 * this new VLAN rule or else create a new list.
3326 map_info = ice_find_vsi_list_entry(recp_list,
3330 status = ice_create_vsi_list_rule(hw,
3338 /* Convert the action to forwarding to a VSI list. */
3339 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3340 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3343 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3345 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3348 status = ICE_ERR_DOES_NOT_EXIST;
3351 /* reuse VSI list for new rule and increment ref_cnt */
3353 v_list_itr->vsi_list_info = map_info;
3354 map_info->ref_cnt++;
3356 v_list_itr->vsi_list_info =
3357 ice_create_vsi_list_map(hw, &vsi_handle,
3361 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3362 /* Update existing VSI list to add new VSI ID only if it used
3365 cur_fltr = &v_list_itr->fltr_info;
3366 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3369 /* If VLAN rule exists and VSI list being used by this rule is
3370 * referenced by more than 1 VLAN rule. Then create a new VSI
3371 * list appending previous VSI with new VSI and update existing
3372 * VLAN rule to point to new VSI list ID
3374 struct ice_fltr_info tmp_fltr;
3375 u16 vsi_handle_arr[2];
3378 /* Current implementation only supports reusing VSI list with
3379 * one VSI count. We should never hit below condition
3381 if (v_list_itr->vsi_count > 1 &&
3382 v_list_itr->vsi_list_info->ref_cnt > 1) {
3383 ice_debug(hw, ICE_DBG_SW,
3384 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3385 status = ICE_ERR_CFG;
3390 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3393 /* A rule already exists with the new VSI being added */
3394 if (cur_handle == vsi_handle) {
3395 status = ICE_ERR_ALREADY_EXISTS;
3399 vsi_handle_arr[0] = cur_handle;
3400 vsi_handle_arr[1] = vsi_handle;
3401 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3402 &vsi_list_id, lkup_type);
3406 tmp_fltr = v_list_itr->fltr_info;
3407 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3408 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3409 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3410 /* Update the previous switch rule to a new VSI list which
3411 * includes current VSI that is requested
3413 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3417 /* before overriding VSI list map info. decrement ref_cnt of
3420 v_list_itr->vsi_list_info->ref_cnt--;
3422 /* now update to newly created list */
3423 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3424 v_list_itr->vsi_list_info =
3425 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3427 v_list_itr->vsi_count++;
3431 ice_release_lock(rule_lock);
3436 * ice_add_vlan_rule - Add VLAN based filter rule
3437 * @hw: pointer to the hardware structure
3438 * @v_list: list of VLAN entries and forwarding information
3439 * @sw: pointer to switch info struct for which function add rule
3441 static enum ice_status
3442 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3443 struct ice_switch_info *sw)
3445 struct ice_fltr_list_entry *v_list_itr;
3446 struct ice_sw_recipe *recp_list;
3448 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3449 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3451 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3452 return ICE_ERR_PARAM;
3453 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3454 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3456 if (v_list_itr->status)
3457 return v_list_itr->status;
3463 * ice_add_vlan - Add a VLAN based filter rule
3464 * @hw: pointer to the hardware structure
3465 * @v_list: list of VLAN and forwarding information
3467 * Function add VLAN rule for logical port from HW struct
3470 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3473 return ICE_ERR_PARAM;
3475 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3479 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3480 * @hw: pointer to the hardware structure
3481 * @mv_list: list of MAC and VLAN filters
3483 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3484 * pruning bits enabled, then it is the responsibility of the caller to make
3485 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3486 * VLAN won't be received on that VSI otherwise.
3489 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3491 struct ice_fltr_list_entry *mv_list_itr;
3492 struct ice_sw_recipe *recp_list;
3494 if (!mv_list || !hw)
3495 return ICE_ERR_PARAM;
3497 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3498 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3500 enum ice_sw_lkup_type l_type =
3501 mv_list_itr->fltr_info.lkup_type;
3503 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3504 return ICE_ERR_PARAM;
3505 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3506 mv_list_itr->status =
3507 ice_add_rule_internal(hw, recp_list,
3508 hw->port_info->lport,
3510 if (mv_list_itr->status)
3511 return mv_list_itr->status;
3517 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3518 * @hw: pointer to the hardware structure
3519 * @em_list: list of ether type MAC filter, MAC is optional
3520 * @sw: pointer to switch info struct for which function add rule
3521 * @lport: logic port number on which function add rule
3523 * This function requires the caller to populate the entries in
3524 * the filter list with the necessary fields (including flags to
3525 * indicate Tx or Rx rules).
3527 static enum ice_status
3528 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3529 struct ice_switch_info *sw, u8 lport)
3531 struct ice_fltr_list_entry *em_list_itr;
3533 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3535 struct ice_sw_recipe *recp_list;
3536 enum ice_sw_lkup_type l_type;
3538 l_type = em_list_itr->fltr_info.lkup_type;
3539 recp_list = &sw->recp_list[l_type];
3541 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3542 l_type != ICE_SW_LKUP_ETHERTYPE)
3543 return ICE_ERR_PARAM;
3545 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3548 if (em_list_itr->status)
3549 return em_list_itr->status;
3556 * ice_add_eth_mac - Add a ethertype based filter rule
3557 * @hw: pointer to the hardware structure
3558 * @em_list: list of ethertype and forwarding information
3560 * Function add ethertype rule for logical port from HW struct
3562 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3564 if (!em_list || !hw)
3565 return ICE_ERR_PARAM;
3567 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3568 hw->port_info->lport);
3572 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3573 * @hw: pointer to the hardware structure
3574 * @em_list: list of ethertype or ethertype MAC entries
3575 * @sw: pointer to switch info struct for which function add rule
3577 static enum ice_status
3578 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3579 struct ice_switch_info *sw)
3581 struct ice_fltr_list_entry *em_list_itr, *tmp;
3583 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3585 struct ice_sw_recipe *recp_list;
3586 enum ice_sw_lkup_type l_type;
3588 l_type = em_list_itr->fltr_info.lkup_type;
3590 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3591 l_type != ICE_SW_LKUP_ETHERTYPE)
3592 return ICE_ERR_PARAM;
3594 recp_list = &sw->recp_list[l_type];
3595 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3597 if (em_list_itr->status)
3598 return em_list_itr->status;
3604 * ice_remove_eth_mac - remove a ethertype based filter rule
3605 * @hw: pointer to the hardware structure
3606 * @em_list: list of ethertype and forwarding information
3610 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3612 if (!em_list || !hw)
3613 return ICE_ERR_PARAM;
3615 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3619 * ice_rem_sw_rule_info
3620 * @hw: pointer to the hardware structure
3621 * @rule_head: pointer to the switch list structure that we want to delete
3624 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3626 if (!LIST_EMPTY(rule_head)) {
3627 struct ice_fltr_mgmt_list_entry *entry;
3628 struct ice_fltr_mgmt_list_entry *tmp;
3630 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3631 ice_fltr_mgmt_list_entry, list_entry) {
3632 LIST_DEL(&entry->list_entry);
3633 ice_free(hw, entry);
3639 * ice_rem_adv_rule_info
3640 * @hw: pointer to the hardware structure
3641 * @rule_head: pointer to the switch list structure that we want to delete
3644 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3646 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3647 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3649 if (LIST_EMPTY(rule_head))
3652 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3653 ice_adv_fltr_mgmt_list_entry, list_entry) {
3654 LIST_DEL(&lst_itr->list_entry);
3655 ice_free(hw, lst_itr->lkups);
3656 ice_free(hw, lst_itr);
3661 * ice_rem_all_sw_rules_info
3662 * @hw: pointer to the hardware structure
3664 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3666 struct ice_switch_info *sw = hw->switch_info;
3669 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3670 struct LIST_HEAD_TYPE *rule_head;
3672 rule_head = &sw->recp_list[i].filt_rules;
3673 if (!sw->recp_list[i].adv_rule)
3674 ice_rem_sw_rule_info(hw, rule_head);
3676 ice_rem_adv_rule_info(hw, rule_head);
3681 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3682 * @pi: pointer to the port_info structure
3683 * @vsi_handle: VSI handle to set as default
3684 * @set: true to add the above mentioned switch rule, false to remove it
3685 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3687 * add filter rule to set/unset given VSI as default VSI for the switch
3688 * (represented by swid)
3691 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3694 struct ice_aqc_sw_rules_elem *s_rule;
3695 struct ice_fltr_info f_info;
3696 struct ice_hw *hw = pi->hw;
3697 enum ice_adminq_opc opcode;
3698 enum ice_status status;
3702 if (!ice_is_vsi_valid(hw, vsi_handle))
3703 return ICE_ERR_PARAM;
3704 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3706 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3707 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3708 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3710 return ICE_ERR_NO_MEMORY;
3712 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3714 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3715 f_info.flag = direction;
3716 f_info.fltr_act = ICE_FWD_TO_VSI;
3717 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3719 if (f_info.flag & ICE_FLTR_RX) {
3720 f_info.src = pi->lport;
3721 f_info.src_id = ICE_SRC_ID_LPORT;
3723 f_info.fltr_rule_id =
3724 pi->dflt_rx_vsi_rule_id;
3725 } else if (f_info.flag & ICE_FLTR_TX) {
3726 f_info.src_id = ICE_SRC_ID_VSI;
3727 f_info.src = hw_vsi_id;
3729 f_info.fltr_rule_id =
3730 pi->dflt_tx_vsi_rule_id;
3734 opcode = ice_aqc_opc_add_sw_rules;
3736 opcode = ice_aqc_opc_remove_sw_rules;
3738 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3740 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3741 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3744 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3746 if (f_info.flag & ICE_FLTR_TX) {
3747 pi->dflt_tx_vsi_num = hw_vsi_id;
3748 pi->dflt_tx_vsi_rule_id = index;
3749 } else if (f_info.flag & ICE_FLTR_RX) {
3750 pi->dflt_rx_vsi_num = hw_vsi_id;
3751 pi->dflt_rx_vsi_rule_id = index;
3754 if (f_info.flag & ICE_FLTR_TX) {
3755 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3756 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3757 } else if (f_info.flag & ICE_FLTR_RX) {
3758 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3759 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3764 ice_free(hw, s_rule);
3769 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3770 * @list_head: head of rule list
3771 * @f_info: rule information
3773 * Helper function to search for a unicast rule entry - this is to be used
3774 * to remove unicast MAC filter that is not shared with other VSIs on the
3777 * Returns pointer to entry storing the rule if found
3779 static struct ice_fltr_mgmt_list_entry *
3780 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3781 struct ice_fltr_info *f_info)
3783 struct ice_fltr_mgmt_list_entry *list_itr;
3785 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3787 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3788 sizeof(f_info->l_data)) &&
3789 f_info->fwd_id.hw_vsi_id ==
3790 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3791 f_info->flag == list_itr->fltr_info.flag)
3798 * ice_remove_mac_rule - remove a MAC based filter rule
3799 * @hw: pointer to the hardware structure
3800 * @m_list: list of MAC addresses and forwarding information
3801 * @recp_list: list from which function remove MAC address
3803 * This function removes either a MAC filter rule or a specific VSI from a
3804 * VSI list for a multicast MAC address.
3806 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3807 * ice_add_mac. Caller should be aware that this call will only work if all
3808 * the entries passed into m_list were added previously. It will not attempt to
3809 * do a partial remove of entries that were found.
3811 static enum ice_status
3812 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3813 struct ice_sw_recipe *recp_list)
3815 struct ice_fltr_list_entry *list_itr, *tmp;
3816 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3819 return ICE_ERR_PARAM;
3821 rule_lock = &recp_list->filt_rule_lock;
3822 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3824 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3825 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3828 if (l_type != ICE_SW_LKUP_MAC)
3829 return ICE_ERR_PARAM;
3831 vsi_handle = list_itr->fltr_info.vsi_handle;
3832 if (!ice_is_vsi_valid(hw, vsi_handle))
3833 return ICE_ERR_PARAM;
3835 list_itr->fltr_info.fwd_id.hw_vsi_id =
3836 ice_get_hw_vsi_num(hw, vsi_handle);
3837 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3838 /* Don't remove the unicast address that belongs to
3839 * another VSI on the switch, since it is not being
3842 ice_acquire_lock(rule_lock);
3843 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3844 &list_itr->fltr_info)) {
3845 ice_release_lock(rule_lock);
3846 return ICE_ERR_DOES_NOT_EXIST;
3848 ice_release_lock(rule_lock);
3850 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3852 if (list_itr->status)
3853 return list_itr->status;
3859 * ice_remove_mac - remove a MAC address based filter rule
3860 * @hw: pointer to the hardware structure
3861 * @m_list: list of MAC addresses and forwarding information
3865 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3867 struct ice_sw_recipe *recp_list;
3869 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3870 return ice_remove_mac_rule(hw, m_list, recp_list);
3874 * ice_remove_vlan_rule - Remove VLAN based filter rule
3875 * @hw: pointer to the hardware structure
3876 * @v_list: list of VLAN entries and forwarding information
3877 * @recp_list: list from which function remove VLAN
3879 static enum ice_status
3880 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3881 struct ice_sw_recipe *recp_list)
3883 struct ice_fltr_list_entry *v_list_itr, *tmp;
3885 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3887 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3889 if (l_type != ICE_SW_LKUP_VLAN)
3890 return ICE_ERR_PARAM;
3891 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3893 if (v_list_itr->status)
3894 return v_list_itr->status;
3900 * ice_remove_vlan - remove a VLAN address based filter rule
3901 * @hw: pointer to the hardware structure
3902 * @v_list: list of VLAN and forwarding information
3906 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3908 struct ice_sw_recipe *recp_list;
3911 return ICE_ERR_PARAM;
3913 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3914 return ice_remove_vlan_rule(hw, v_list, recp_list);
3918 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3919 * @hw: pointer to the hardware structure
3920 * @v_list: list of MAC VLAN entries and forwarding information
3923 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3925 struct ice_fltr_list_entry *v_list_itr, *tmp;
3926 struct ice_sw_recipe *recp_list;
3929 return ICE_ERR_PARAM;
3931 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3932 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3934 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3936 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3937 return ICE_ERR_PARAM;
3938 v_list_itr->status =
3939 ice_remove_rule_internal(hw, recp_list,
3941 if (v_list_itr->status)
3942 return v_list_itr->status;
3948 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3949 * @fm_entry: filter entry to inspect
3950 * @vsi_handle: VSI handle to compare with filter info
3953 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3955 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3956 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3957 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3958 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3963 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3964 * @hw: pointer to the hardware structure
3965 * @vsi_handle: VSI handle to remove filters from
3966 * @vsi_list_head: pointer to the list to add entry to
3967 * @fi: pointer to fltr_info of filter entry to copy & add
3969 * Helper function, used when creating a list of filters to remove from
3970 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3971 * original filter entry, with the exception of fltr_info.fltr_act and
3972 * fltr_info.fwd_id fields. These are set such that later logic can
3973 * extract which VSI to remove the fltr from, and pass on that information.
3975 static enum ice_status
3976 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3977 struct LIST_HEAD_TYPE *vsi_list_head,
3978 struct ice_fltr_info *fi)
3980 struct ice_fltr_list_entry *tmp;
3982 /* this memory is freed up in the caller function
3983 * once filters for this VSI are removed
3985 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3987 return ICE_ERR_NO_MEMORY;
3989 tmp->fltr_info = *fi;
3991 /* Overwrite these fields to indicate which VSI to remove filter from,
3992 * so find and remove logic can extract the information from the
3993 * list entries. Note that original entries will still have proper
3996 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3997 tmp->fltr_info.vsi_handle = vsi_handle;
3998 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4000 LIST_ADD(&tmp->list_entry, vsi_list_head);
4006 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4007 * @hw: pointer to the hardware structure
4008 * @vsi_handle: VSI handle to remove filters from
4009 * @lkup_list_head: pointer to the list that has certain lookup type filters
4010 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4012 * Locates all filters in lkup_list_head that are used by the given VSI,
4013 * and adds COPIES of those entries to vsi_list_head (intended to be used
4014 * to remove the listed filters).
4015 * Note that this means all entries in vsi_list_head must be explicitly
4016 * deallocated by the caller when done with list.
4018 static enum ice_status
4019 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4020 struct LIST_HEAD_TYPE *lkup_list_head,
4021 struct LIST_HEAD_TYPE *vsi_list_head)
4023 struct ice_fltr_mgmt_list_entry *fm_entry;
4024 enum ice_status status = ICE_SUCCESS;
4026 /* check to make sure VSI ID is valid and within boundary */
4027 if (!ice_is_vsi_valid(hw, vsi_handle))
4028 return ICE_ERR_PARAM;
4030 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4031 ice_fltr_mgmt_list_entry, list_entry) {
4032 struct ice_fltr_info *fi;
4034 fi = &fm_entry->fltr_info;
4035 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4038 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4047 * ice_determine_promisc_mask
4048 * @fi: filter info to parse
4050 * Helper function to determine which ICE_PROMISC_ mask corresponds
4051 * to given filter into.
4053 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4055 u16 vid = fi->l_data.mac_vlan.vlan_id;
4056 u8 *macaddr = fi->l_data.mac.mac_addr;
4057 bool is_tx_fltr = false;
4058 u8 promisc_mask = 0;
4060 if (fi->flag == ICE_FLTR_TX)
4063 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4064 promisc_mask |= is_tx_fltr ?
4065 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4066 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4067 promisc_mask |= is_tx_fltr ?
4068 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4069 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4070 promisc_mask |= is_tx_fltr ?
4071 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4073 promisc_mask |= is_tx_fltr ?
4074 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4076 return promisc_mask;
4080 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4081 * @hw: pointer to the hardware structure
4082 * @vsi_handle: VSI handle to retrieve info from
4083 * @promisc_mask: pointer to mask to be filled in
4084 * @vid: VLAN ID of promisc VLAN VSI
4087 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4090 struct ice_switch_info *sw = hw->switch_info;
4091 struct ice_fltr_mgmt_list_entry *itr;
4092 struct LIST_HEAD_TYPE *rule_head;
4093 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4095 if (!ice_is_vsi_valid(hw, vsi_handle))
4096 return ICE_ERR_PARAM;
4100 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4101 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4103 ice_acquire_lock(rule_lock);
4104 LIST_FOR_EACH_ENTRY(itr, rule_head,
4105 ice_fltr_mgmt_list_entry, list_entry) {
4106 /* Continue if this filter doesn't apply to this VSI or the
4107 * VSI ID is not in the VSI map for this filter
4109 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4112 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4114 ice_release_lock(rule_lock);
4120 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4121 * @hw: pointer to the hardware structure
4122 * @vsi_handle: VSI handle to retrieve info from
4123 * @promisc_mask: pointer to mask to be filled in
4124 * @vid: VLAN ID of promisc VLAN VSI
4127 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4130 struct ice_switch_info *sw = hw->switch_info;
4131 struct ice_fltr_mgmt_list_entry *itr;
4132 struct LIST_HEAD_TYPE *rule_head;
4133 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4135 if (!ice_is_vsi_valid(hw, vsi_handle))
4136 return ICE_ERR_PARAM;
4140 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4141 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4143 ice_acquire_lock(rule_lock);
4144 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4146 /* Continue if this filter doesn't apply to this VSI or the
4147 * VSI ID is not in the VSI map for this filter
4149 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4152 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4154 ice_release_lock(rule_lock);
4160 * ice_remove_promisc - Remove promisc based filter rules
4161 * @hw: pointer to the hardware structure
4162 * @recp_id: recipe ID for which the rule needs to removed
4163 * @v_list: list of promisc entries
4165 static enum ice_status
4166 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4167 struct LIST_HEAD_TYPE *v_list)
4169 struct ice_fltr_list_entry *v_list_itr, *tmp;
4170 struct ice_sw_recipe *recp_list;
4172 recp_list = &hw->switch_info->recp_list[recp_id];
4173 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4175 v_list_itr->status =
4176 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4177 if (v_list_itr->status)
4178 return v_list_itr->status;
4184 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4185 * @hw: pointer to the hardware structure
4186 * @vsi_handle: VSI handle to clear mode
4187 * @promisc_mask: mask of promiscuous config bits to clear
4188 * @vid: VLAN ID to clear VLAN promiscuous
4191 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4194 struct ice_switch_info *sw = hw->switch_info;
4195 struct ice_fltr_list_entry *fm_entry, *tmp;
4196 struct LIST_HEAD_TYPE remove_list_head;
4197 struct ice_fltr_mgmt_list_entry *itr;
4198 struct LIST_HEAD_TYPE *rule_head;
4199 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4200 enum ice_status status = ICE_SUCCESS;
4203 if (!ice_is_vsi_valid(hw, vsi_handle))
4204 return ICE_ERR_PARAM;
4206 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4207 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4209 recipe_id = ICE_SW_LKUP_PROMISC;
4211 rule_head = &sw->recp_list[recipe_id].filt_rules;
4212 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4214 INIT_LIST_HEAD(&remove_list_head);
4216 ice_acquire_lock(rule_lock);
4217 LIST_FOR_EACH_ENTRY(itr, rule_head,
4218 ice_fltr_mgmt_list_entry, list_entry) {
4219 struct ice_fltr_info *fltr_info;
4220 u8 fltr_promisc_mask = 0;
4222 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4224 fltr_info = &itr->fltr_info;
4226 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4227 vid != fltr_info->l_data.mac_vlan.vlan_id)
4230 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4232 /* Skip if filter is not completely specified by given mask */
4233 if (fltr_promisc_mask & ~promisc_mask)
4236 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4240 ice_release_lock(rule_lock);
4241 goto free_fltr_list;
4244 ice_release_lock(rule_lock);
4246 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4249 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4250 ice_fltr_list_entry, list_entry) {
4251 LIST_DEL(&fm_entry->list_entry);
4252 ice_free(hw, fm_entry);
4259 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4260 * @hw: pointer to the hardware structure
4261 * @vsi_handle: VSI handle to configure
4262 * @promisc_mask: mask of promiscuous config bits
4263 * @vid: VLAN ID to set VLAN promiscuous
4266 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4268 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4269 struct ice_fltr_list_entry f_list_entry;
4270 struct ice_fltr_info new_fltr;
4271 enum ice_status status = ICE_SUCCESS;
4277 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4279 if (!ice_is_vsi_valid(hw, vsi_handle))
4280 return ICE_ERR_PARAM;
4281 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4283 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4285 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4286 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4287 new_fltr.l_data.mac_vlan.vlan_id = vid;
4288 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4290 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4291 recipe_id = ICE_SW_LKUP_PROMISC;
4294 /* Separate filters must be set for each direction/packet type
4295 * combination, so we will loop over the mask value, store the
4296 * individual type, and clear it out in the input mask as it
4299 while (promisc_mask) {
4300 struct ice_sw_recipe *recp_list;
4306 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4307 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4308 pkt_type = UCAST_FLTR;
4309 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4310 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4311 pkt_type = UCAST_FLTR;
4313 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4314 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4315 pkt_type = MCAST_FLTR;
4316 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4317 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4318 pkt_type = MCAST_FLTR;
4320 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4321 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4322 pkt_type = BCAST_FLTR;
4323 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4324 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4325 pkt_type = BCAST_FLTR;
4329 /* Check for VLAN promiscuous flag */
4330 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4331 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4332 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4333 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4337 /* Set filter DA based on packet type */
4338 mac_addr = new_fltr.l_data.mac.mac_addr;
4339 if (pkt_type == BCAST_FLTR) {
4340 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4341 } else if (pkt_type == MCAST_FLTR ||
4342 pkt_type == UCAST_FLTR) {
4343 /* Use the dummy ether header DA */
4344 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4345 ICE_NONDMA_TO_NONDMA);
4346 if (pkt_type == MCAST_FLTR)
4347 mac_addr[0] |= 0x1; /* Set multicast bit */
4350 /* Need to reset this to zero for all iterations */
4353 new_fltr.flag |= ICE_FLTR_TX;
4354 new_fltr.src = hw_vsi_id;
4356 new_fltr.flag |= ICE_FLTR_RX;
4357 new_fltr.src = hw->port_info->lport;
4360 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4361 new_fltr.vsi_handle = vsi_handle;
4362 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4363 f_list_entry.fltr_info = new_fltr;
4364 recp_list = &hw->switch_info->recp_list[recipe_id];
4366 status = ice_add_rule_internal(hw, recp_list,
4367 hw->port_info->lport,
4369 if (status != ICE_SUCCESS)
4370 goto set_promisc_exit;
4378 * ice_set_vlan_vsi_promisc
4379 * @hw: pointer to the hardware structure
4380 * @vsi_handle: VSI handle to configure
4381 * @promisc_mask: mask of promiscuous config bits
4382 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4384 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4387 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4388 bool rm_vlan_promisc)
4390 struct ice_switch_info *sw = hw->switch_info;
4391 struct ice_fltr_list_entry *list_itr, *tmp;
4392 struct LIST_HEAD_TYPE vsi_list_head;
4393 struct LIST_HEAD_TYPE *vlan_head;
4394 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4395 enum ice_status status;
4398 INIT_LIST_HEAD(&vsi_list_head);
4399 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4400 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4401 ice_acquire_lock(vlan_lock);
4402 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4404 ice_release_lock(vlan_lock);
4406 goto free_fltr_list;
4408 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4410 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4411 if (rm_vlan_promisc)
4412 status = ice_clear_vsi_promisc(hw, vsi_handle,
4413 promisc_mask, vlan_id);
4415 status = ice_set_vsi_promisc(hw, vsi_handle,
4416 promisc_mask, vlan_id);
4422 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4423 ice_fltr_list_entry, list_entry) {
4424 LIST_DEL(&list_itr->list_entry);
4425 ice_free(hw, list_itr);
4431 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4432 * @hw: pointer to the hardware structure
4433 * @vsi_handle: VSI handle to remove filters from
4434 * @recp_list: recipe list from which function remove fltr
4435 * @lkup: switch rule filter lookup type
4438 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4439 struct ice_sw_recipe *recp_list,
4440 enum ice_sw_lkup_type lkup)
4442 struct ice_fltr_list_entry *fm_entry;
4443 struct LIST_HEAD_TYPE remove_list_head;
4444 struct LIST_HEAD_TYPE *rule_head;
4445 struct ice_fltr_list_entry *tmp;
4446 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4447 enum ice_status status;
4449 INIT_LIST_HEAD(&remove_list_head);
4450 rule_lock = &recp_list[lkup].filt_rule_lock;
4451 rule_head = &recp_list[lkup].filt_rules;
4452 ice_acquire_lock(rule_lock);
4453 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4455 ice_release_lock(rule_lock);
4460 case ICE_SW_LKUP_MAC:
4461 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4463 case ICE_SW_LKUP_VLAN:
4464 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4466 case ICE_SW_LKUP_PROMISC:
4467 case ICE_SW_LKUP_PROMISC_VLAN:
4468 ice_remove_promisc(hw, lkup, &remove_list_head);
4470 case ICE_SW_LKUP_MAC_VLAN:
4471 ice_remove_mac_vlan(hw, &remove_list_head);
4473 case ICE_SW_LKUP_ETHERTYPE:
4474 case ICE_SW_LKUP_ETHERTYPE_MAC:
4475 ice_remove_eth_mac(hw, &remove_list_head);
4477 case ICE_SW_LKUP_DFLT:
4478 ice_debug(hw, ICE_DBG_SW,
4479 "Remove filters for this lookup type hasn't been implemented yet\n");
4481 case ICE_SW_LKUP_LAST:
4482 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4486 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4487 ice_fltr_list_entry, list_entry) {
4488 LIST_DEL(&fm_entry->list_entry);
4489 ice_free(hw, fm_entry);
4494 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4495 * @hw: pointer to the hardware structure
4496 * @vsi_handle: VSI handle to remove filters from
4497 * @sw: pointer to switch info struct
4500 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4501 struct ice_switch_info *sw)
4503 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4505 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4506 sw->recp_list, ICE_SW_LKUP_MAC);
4507 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4508 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4509 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4510 sw->recp_list, ICE_SW_LKUP_PROMISC);
4511 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4512 sw->recp_list, ICE_SW_LKUP_VLAN);
4513 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4514 sw->recp_list, ICE_SW_LKUP_DFLT);
4515 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4516 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4517 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4518 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4519 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4520 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4524 * ice_remove_vsi_fltr - Remove all filters for a VSI
4525 * @hw: pointer to the hardware structure
4526 * @vsi_handle: VSI handle to remove filters from
4528 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4530 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4534 * ice_alloc_res_cntr - allocating resource counter
4535 * @hw: pointer to the hardware structure
4536 * @type: type of resource
4537 * @alloc_shared: if set it is shared else dedicated
4538 * @num_items: number of entries requested for FD resource type
4539 * @counter_id: counter index returned by AQ call
4542 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4545 struct ice_aqc_alloc_free_res_elem *buf;
4546 enum ice_status status;
4549 /* Allocate resource */
4550 buf_len = sizeof(*buf);
4551 buf = (struct ice_aqc_alloc_free_res_elem *)
4552 ice_malloc(hw, buf_len);
4554 return ICE_ERR_NO_MEMORY;
4556 buf->num_elems = CPU_TO_LE16(num_items);
4557 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4558 ICE_AQC_RES_TYPE_M) | alloc_shared);
4560 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4561 ice_aqc_opc_alloc_res, NULL);
4565 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4573 * ice_free_res_cntr - free resource counter
4574 * @hw: pointer to the hardware structure
4575 * @type: type of resource
4576 * @alloc_shared: if set it is shared else dedicated
4577 * @num_items: number of entries to be freed for FD resource type
4578 * @counter_id: counter ID resource which needs to be freed
4581 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4584 struct ice_aqc_alloc_free_res_elem *buf;
4585 enum ice_status status;
4589 buf_len = sizeof(*buf);
4590 buf = (struct ice_aqc_alloc_free_res_elem *)
4591 ice_malloc(hw, buf_len);
4593 return ICE_ERR_NO_MEMORY;
4595 buf->num_elems = CPU_TO_LE16(num_items);
4596 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4597 ICE_AQC_RES_TYPE_M) | alloc_shared);
4598 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4600 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4601 ice_aqc_opc_free_res, NULL);
4603 ice_debug(hw, ICE_DBG_SW,
4604 "counter resource could not be freed\n");
4611 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4612 * @hw: pointer to the hardware structure
4613 * @counter_id: returns counter index
4615 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4617 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4618 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4623 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4624 * @hw: pointer to the hardware structure
4625 * @counter_id: counter index to be freed
4627 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4629 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4630 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4635 * ice_alloc_res_lg_act - add large action resource
4636 * @hw: pointer to the hardware structure
4637 * @l_id: large action ID to fill it in
4638 * @num_acts: number of actions to hold with a large action entry
4640 static enum ice_status
4641 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4643 struct ice_aqc_alloc_free_res_elem *sw_buf;
4644 enum ice_status status;
4647 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4648 return ICE_ERR_PARAM;
4650 /* Allocate resource for large action */
4651 buf_len = sizeof(*sw_buf);
4652 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4653 ice_malloc(hw, buf_len);
4655 return ICE_ERR_NO_MEMORY;
4657 sw_buf->num_elems = CPU_TO_LE16(1);
4659 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4660 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4661 * If num_acts is greater than 2, then use
4662 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4663 * The num_acts cannot exceed 4. This was ensured at the
4664 * beginning of the function.
4667 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4668 else if (num_acts == 2)
4669 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4671 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4673 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4674 ice_aqc_opc_alloc_res, NULL);
4676 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4678 ice_free(hw, sw_buf);
4683 * ice_add_mac_with_sw_marker - add filter with sw marker
4684 * @hw: pointer to the hardware structure
4685 * @f_info: filter info structure containing the MAC filter information
4686 * @sw_marker: sw marker to tag the Rx descriptor with
4689 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4692 struct ice_fltr_mgmt_list_entry *m_entry;
4693 struct ice_fltr_list_entry fl_info;
4694 struct ice_sw_recipe *recp_list;
4695 struct LIST_HEAD_TYPE l_head;
4696 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4697 enum ice_status ret;
4701 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4702 return ICE_ERR_PARAM;
4704 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4705 return ICE_ERR_PARAM;
4707 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4708 return ICE_ERR_PARAM;
4710 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4711 return ICE_ERR_PARAM;
4712 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4714 /* Add filter if it doesn't exist so then the adding of large
4715 * action always results in update
4718 INIT_LIST_HEAD(&l_head);
4719 fl_info.fltr_info = *f_info;
4720 LIST_ADD(&fl_info.list_entry, &l_head);
4722 entry_exists = false;
4723 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4724 hw->port_info->lport);
4725 if (ret == ICE_ERR_ALREADY_EXISTS)
4726 entry_exists = true;
4730 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4731 rule_lock = &recp_list->filt_rule_lock;
4732 ice_acquire_lock(rule_lock);
4733 /* Get the book keeping entry for the filter */
4734 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4738 /* If counter action was enabled for this rule then don't enable
4739 * sw marker large action
4741 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4742 ret = ICE_ERR_PARAM;
4746 /* if same marker was added before */
4747 if (m_entry->sw_marker_id == sw_marker) {
4748 ret = ICE_ERR_ALREADY_EXISTS;
4752 /* Allocate a hardware table entry to hold large act. Three actions
4753 * for marker based large action
4755 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4759 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4762 /* Update the switch rule to add the marker action */
4763 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4765 ice_release_lock(rule_lock);
4770 ice_release_lock(rule_lock);
4771 /* only remove entry if it did not exist previously */
4773 ret = ice_remove_mac(hw, &l_head);
4779 * ice_add_mac_with_counter - add filter with counter enabled
4780 * @hw: pointer to the hardware structure
4781 * @f_info: pointer to filter info structure containing the MAC filter
4785 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4787 struct ice_fltr_mgmt_list_entry *m_entry;
4788 struct ice_fltr_list_entry fl_info;
4789 struct ice_sw_recipe *recp_list;
4790 struct LIST_HEAD_TYPE l_head;
4791 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4792 enum ice_status ret;
4797 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4798 return ICE_ERR_PARAM;
4800 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4801 return ICE_ERR_PARAM;
4803 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4804 return ICE_ERR_PARAM;
4805 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4806 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4808 entry_exist = false;
4810 rule_lock = &recp_list->filt_rule_lock;
4812 /* Add filter if it doesn't exist so then the adding of large
4813 * action always results in update
4815 INIT_LIST_HEAD(&l_head);
4817 fl_info.fltr_info = *f_info;
4818 LIST_ADD(&fl_info.list_entry, &l_head);
4820 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4821 hw->port_info->lport);
4822 if (ret == ICE_ERR_ALREADY_EXISTS)
4827 ice_acquire_lock(rule_lock);
4828 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4830 ret = ICE_ERR_BAD_PTR;
4834 /* Don't enable counter for a filter for which sw marker was enabled */
4835 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4836 ret = ICE_ERR_PARAM;
4840 /* If a counter was already enabled then don't need to add again */
4841 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4842 ret = ICE_ERR_ALREADY_EXISTS;
4846 /* Allocate a hardware table entry to VLAN counter */
4847 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4851 /* Allocate a hardware table entry to hold large act. Two actions for
4852 * counter based large action
4854 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4858 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4861 /* Update the switch rule to add the counter action */
4862 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4864 ice_release_lock(rule_lock);
4869 ice_release_lock(rule_lock);
4870 /* only remove entry if it did not exist previously */
4872 ret = ice_remove_mac(hw, &l_head);
4877 /* This is mapping table entry that maps every word within a given protocol
4878 * structure to the real byte offset as per the specification of that
4880 * for example dst address is 3 words in ethertype header and corresponding
4881 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4882 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4883 * matching entry describing its field. This needs to be updated if new
4884 * structure is added to that union.
4886 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4887 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4888 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4889 { ICE_ETYPE_OL, { 0 } },
4890 { ICE_VLAN_OFOS, { 0, 2 } },
4891 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4892 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4893 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4894 26, 28, 30, 32, 34, 36, 38 } },
4895 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4896 26, 28, 30, 32, 34, 36, 38 } },
4897 { ICE_TCP_IL, { 0, 2 } },
4898 { ICE_UDP_OF, { 0, 2 } },
4899 { ICE_UDP_ILOS, { 0, 2 } },
4900 { ICE_SCTP_IL, { 0, 2 } },
4901 { ICE_VXLAN, { 8, 10, 12, 14 } },
4902 { ICE_GENEVE, { 8, 10, 12, 14 } },
4903 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4904 { ICE_NVGRE, { 0, 2, 4, 6 } },
4905 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4906 { ICE_PPPOE, { 0, 2, 4, 6 } },
4909 /* The following table describes preferred grouping of recipes.
4910 * If a recipe that needs to be programmed is a superset or matches one of the
4911 * following combinations, then the recipe needs to be chained as per the
4915 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4916 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4917 { ICE_MAC_IL, ICE_MAC_IL_HW },
4918 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4919 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4920 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4921 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4922 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4923 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4924 { ICE_TCP_IL, ICE_TCP_IL_HW },
4925 { ICE_UDP_OF, ICE_UDP_OF_HW },
4926 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4927 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4928 { ICE_VXLAN, ICE_UDP_OF_HW },
4929 { ICE_GENEVE, ICE_UDP_OF_HW },
4930 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4931 { ICE_NVGRE, ICE_GRE_OF_HW },
4932 { ICE_GTP, ICE_UDP_OF_HW },
4933 { ICE_PPPOE, ICE_PPPOE_HW },
4937 * ice_find_recp - find a recipe
4938 * @hw: pointer to the hardware structure
4939 * @lkup_exts: extension sequence to match
4941 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4943 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4945 bool refresh_required = true;
4946 struct ice_sw_recipe *recp;
4949 /* Walk through existing recipes to find a match */
4950 recp = hw->switch_info->recp_list;
4951 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4952 /* If recipe was not created for this ID, in SW bookkeeping,
4953 * check if FW has an entry for this recipe. If the FW has an
4954 * entry update it in our SW bookkeeping and continue with the
4957 if (!recp[i].recp_created)
4958 if (ice_get_recp_frm_fw(hw,
4959 hw->switch_info->recp_list, i,
4963 /* Skip inverse action recipes */
4964 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4965 ICE_AQ_RECIPE_ACT_INV_ACT)
4968 /* if number of words we are looking for match */
4969 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4970 struct ice_fv_word *a = lkup_exts->fv_words;
4971 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4975 for (p = 0; p < lkup_exts->n_val_words; p++) {
4976 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4978 if (a[p].off == b[q].off &&
4979 a[p].prot_id == b[q].prot_id)
4980 /* Found the "p"th word in the
4985 /* After walking through all the words in the
4986 * "i"th recipe if "p"th word was not found then
4987 * this recipe is not what we are looking for.
4988 * So break out from this loop and try the next
4991 if (q >= recp[i].lkup_exts.n_val_words) {
4996 /* If for "i"th recipe the found was never set to false
4997 * then it means we found our match
5000 return i; /* Return the recipe ID */
5003 return ICE_MAX_NUM_RECIPES;
5007 * ice_prot_type_to_id - get protocol ID from protocol type
5008 * @type: protocol type
5009 * @id: pointer to variable that will receive the ID
5011 * Returns true if found, false otherwise
5013 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
5017 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5018 if (ice_prot_id_tbl[i].type == type) {
5019 *id = ice_prot_id_tbl[i].protocol_id;
5026 * ice_find_valid_words - count valid words
5027 * @rule: advanced rule with lookup information
5028 * @lkup_exts: byte offset extractions of the words that are valid
5030 * calculate valid words in a lookup rule using mask value
5033 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5034 struct ice_prot_lkup_ext *lkup_exts)
5040 if (!ice_prot_type_to_id(rule->type, &prot_id))
5043 word = lkup_exts->n_val_words;
5045 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5046 if (((u16 *)&rule->m_u)[j] &&
5047 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
5048 /* No more space to accommodate */
5049 if (word >= ICE_MAX_CHAIN_WORDS)
5051 lkup_exts->fv_words[word].off =
5052 ice_prot_ext[rule->type].offs[j];
5053 lkup_exts->fv_words[word].prot_id =
5054 ice_prot_id_tbl[rule->type].protocol_id;
5055 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5059 ret_val = word - lkup_exts->n_val_words;
5060 lkup_exts->n_val_words = word;
5066 * ice_create_first_fit_recp_def - Create a recipe grouping
5067 * @hw: pointer to the hardware structure
5068 * @lkup_exts: an array of protocol header extractions
5069 * @rg_list: pointer to a list that stores new recipe groups
5070 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5072 * Using first fit algorithm, take all the words that are still not done
5073 * and start grouping them in 4-word groups. Each group makes up one
5076 static enum ice_status
5077 ice_create_first_fit_recp_def(struct ice_hw *hw,
5078 struct ice_prot_lkup_ext *lkup_exts,
5079 struct LIST_HEAD_TYPE *rg_list,
5082 struct ice_pref_recipe_group *grp = NULL;
5087 /* Walk through every word in the rule to check if it is not done. If so
5088 * then this word needs to be part of a new recipe.
5090 for (j = 0; j < lkup_exts->n_val_words; j++)
5091 if (!ice_is_bit_set(lkup_exts->done, j)) {
5093 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5094 struct ice_recp_grp_entry *entry;
5096 entry = (struct ice_recp_grp_entry *)
5097 ice_malloc(hw, sizeof(*entry));
5099 return ICE_ERR_NO_MEMORY;
5100 LIST_ADD(&entry->l_entry, rg_list);
5101 grp = &entry->r_group;
5105 grp->pairs[grp->n_val_pairs].prot_id =
5106 lkup_exts->fv_words[j].prot_id;
5107 grp->pairs[grp->n_val_pairs].off =
5108 lkup_exts->fv_words[j].off;
5109 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5117 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5118 * @hw: pointer to the hardware structure
5119 * @fv_list: field vector with the extraction sequence information
5120 * @rg_list: recipe groupings with protocol-offset pairs
5122 * Helper function to fill in the field vector indices for protocol-offset
5123 * pairs. These indexes are then ultimately programmed into a recipe.
5125 static enum ice_status
5126 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5127 struct LIST_HEAD_TYPE *rg_list)
5129 struct ice_sw_fv_list_entry *fv;
5130 struct ice_recp_grp_entry *rg;
5131 struct ice_fv_word *fv_ext;
5133 if (LIST_EMPTY(fv_list))
5136 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5137 fv_ext = fv->fv_ptr->ew;
5139 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5142 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5143 struct ice_fv_word *pr;
5148 pr = &rg->r_group.pairs[i];
5149 mask = rg->r_group.mask[i];
5151 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5152 if (fv_ext[j].prot_id == pr->prot_id &&
5153 fv_ext[j].off == pr->off) {
5156 /* Store index of field vector */
5158 /* Mask is given by caller as big
5159 * endian, but sent to FW as little
5162 rg->fv_mask[i] = mask << 8 | mask >> 8;
5166 /* Protocol/offset could not be found, caller gave an
5170 return ICE_ERR_PARAM;
5178 * ice_find_free_recp_res_idx - find free result indexes for recipe
5179 * @hw: pointer to hardware structure
5180 * @profiles: bitmap of profiles that will be associated with the new recipe
5181 * @free_idx: pointer to variable to receive the free index bitmap
5183 * The algorithm used here is:
5184 * 1. When creating a new recipe, create a set P which contains all
5185 * Profiles that will be associated with our new recipe
5187 * 2. For each Profile p in set P:
5188 * a. Add all recipes associated with Profile p into set R
5189 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5190 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5191 * i. Or just assume they all have the same possible indexes:
5193 * i.e., PossibleIndexes = 0x0000F00000000000
5195 * 3. For each Recipe r in set R:
5196 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5197 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5199 * FreeIndexes will contain the bits indicating the indexes free for use,
5200 * then the code needs to update the recipe[r].used_result_idx_bits to
5201 * indicate which indexes were selected for use by this recipe.
5204 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5205 ice_bitmap_t *free_idx)
5207 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5208 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5209 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5213 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5214 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5215 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5216 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5218 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5219 ice_set_bit(count, possible_idx);
5221 /* For each profile we are going to associate the recipe with, add the
5222 * recipes that are associated with that profile. This will give us
5223 * the set of recipes that our recipe may collide with. Also, determine
5224 * what possible result indexes are usable given this set of profiles.
5227 while (ICE_MAX_NUM_PROFILES >
5228 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5229 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5230 ICE_MAX_NUM_RECIPES);
5231 ice_and_bitmap(possible_idx, possible_idx,
5232 hw->switch_info->prof_res_bm[bit],
5237 /* For each recipe that our new recipe may collide with, determine
5238 * which indexes have been used.
5240 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5241 if (ice_is_bit_set(recipes, bit)) {
5242 ice_or_bitmap(used_idx, used_idx,
5243 hw->switch_info->recp_list[bit].res_idxs,
5247 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5249 /* return number of free indexes */
5252 while (ICE_MAX_FV_WORDS >
5253 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5262 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5263 * @hw: pointer to hardware structure
5264 * @rm: recipe management list entry
5265 * @match_tun: if field vector index for tunnel needs to be programmed
5266 * @profiles: bitmap of profiles that will be assocated.
5268 static enum ice_status
5269 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5270 bool match_tun, ice_bitmap_t *profiles)
5272 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5273 struct ice_aqc_recipe_data_elem *tmp;
5274 struct ice_aqc_recipe_data_elem *buf;
5275 struct ice_recp_grp_entry *entry;
5276 enum ice_status status;
5282 /* When more than one recipe are required, another recipe is needed to
5283 * chain them together. Matching a tunnel metadata ID takes up one of
5284 * the match fields in the chaining recipe reducing the number of
5285 * chained recipes by one.
5287 /* check number of free result indices */
5288 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5289 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5291 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5292 free_res_idx, rm->n_grp_count);
5294 if (rm->n_grp_count > 1) {
5295 if (rm->n_grp_count > free_res_idx)
5296 return ICE_ERR_MAX_LIMIT;
5301 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5302 ICE_MAX_NUM_RECIPES,
5305 return ICE_ERR_NO_MEMORY;
5307 buf = (struct ice_aqc_recipe_data_elem *)
5308 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5310 status = ICE_ERR_NO_MEMORY;
5314 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5315 recipe_count = ICE_MAX_NUM_RECIPES;
5316 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5318 if (status || recipe_count == 0)
5321 /* Allocate the recipe resources, and configure them according to the
5322 * match fields from protocol headers and extracted field vectors.
5324 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5325 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5328 status = ice_alloc_recipe(hw, &entry->rid);
5332 /* Clear the result index of the located recipe, as this will be
5333 * updated, if needed, later in the recipe creation process.
5335 tmp[0].content.result_indx = 0;
5337 buf[recps] = tmp[0];
5338 buf[recps].recipe_indx = (u8)entry->rid;
5339 /* if the recipe is a non-root recipe RID should be programmed
5340 * as 0 for the rules to be applied correctly.
5342 buf[recps].content.rid = 0;
5343 ice_memset(&buf[recps].content.lkup_indx, 0,
5344 sizeof(buf[recps].content.lkup_indx),
5347 /* All recipes use look-up index 0 to match switch ID. */
5348 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5349 buf[recps].content.mask[0] =
5350 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5351 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5354 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5355 buf[recps].content.lkup_indx[i] = 0x80;
5356 buf[recps].content.mask[i] = 0;
5359 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5360 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5361 buf[recps].content.mask[i + 1] =
5362 CPU_TO_LE16(entry->fv_mask[i]);
5365 if (rm->n_grp_count > 1) {
5366 /* Checks to see if there really is a valid result index
5369 if (chain_idx >= ICE_MAX_FV_WORDS) {
5370 ice_debug(hw, ICE_DBG_SW,
5371 "No chain index available\n");
5372 status = ICE_ERR_MAX_LIMIT;
5376 entry->chain_idx = chain_idx;
5377 buf[recps].content.result_indx =
5378 ICE_AQ_RECIPE_RESULT_EN |
5379 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5380 ICE_AQ_RECIPE_RESULT_DATA_M);
5381 ice_clear_bit(chain_idx, result_idx_bm);
5382 chain_idx = ice_find_first_bit(result_idx_bm,
5386 /* fill recipe dependencies */
5387 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5388 ICE_MAX_NUM_RECIPES);
5389 ice_set_bit(buf[recps].recipe_indx,
5390 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5391 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5395 if (rm->n_grp_count == 1) {
5396 rm->root_rid = buf[0].recipe_indx;
5397 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5398 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5399 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5400 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5401 sizeof(buf[0].recipe_bitmap),
5402 ICE_NONDMA_TO_NONDMA);
5404 status = ICE_ERR_BAD_PTR;
5407 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5408 * the recipe which is getting created if specified
5409 * by user. Usually any advanced switch filter, which results
5410 * into new extraction sequence, ended up creating a new recipe
5411 * of type ROOT and usually recipes are associated with profiles
5412 * Switch rule referreing newly created recipe, needs to have
5413 * either/or 'fwd' or 'join' priority, otherwise switch rule
5414 * evaluation will not happen correctly. In other words, if
5415 * switch rule to be evaluated on priority basis, then recipe
5416 * needs to have priority, otherwise it will be evaluated last.
5418 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5420 struct ice_recp_grp_entry *last_chain_entry;
5423 /* Allocate the last recipe that will chain the outcomes of the
5424 * other recipes together
5426 status = ice_alloc_recipe(hw, &rid);
5430 buf[recps].recipe_indx = (u8)rid;
5431 buf[recps].content.rid = (u8)rid;
5432 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5433 /* the new entry created should also be part of rg_list to
5434 * make sure we have complete recipe
5436 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5437 sizeof(*last_chain_entry));
5438 if (!last_chain_entry) {
5439 status = ICE_ERR_NO_MEMORY;
5442 last_chain_entry->rid = rid;
5443 ice_memset(&buf[recps].content.lkup_indx, 0,
5444 sizeof(buf[recps].content.lkup_indx),
5446 /* All recipes use look-up index 0 to match switch ID. */
5447 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5448 buf[recps].content.mask[0] =
5449 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5450 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5451 buf[recps].content.lkup_indx[i] =
5452 ICE_AQ_RECIPE_LKUP_IGNORE;
5453 buf[recps].content.mask[i] = 0;
5457 /* update r_bitmap with the recp that is used for chaining */
5458 ice_set_bit(rid, rm->r_bitmap);
5459 /* this is the recipe that chains all the other recipes so it
5460 * should not have a chaining ID to indicate the same
5462 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5463 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5465 last_chain_entry->fv_idx[i] = entry->chain_idx;
5466 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5467 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5468 ice_set_bit(entry->rid, rm->r_bitmap);
5470 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5471 if (sizeof(buf[recps].recipe_bitmap) >=
5472 sizeof(rm->r_bitmap)) {
5473 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5474 sizeof(buf[recps].recipe_bitmap),
5475 ICE_NONDMA_TO_NONDMA);
5477 status = ICE_ERR_BAD_PTR;
5480 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5482 /* To differentiate among different UDP tunnels, a meta data ID
5486 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5487 buf[recps].content.mask[i] =
5488 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5492 rm->root_rid = (u8)rid;
5494 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5498 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5499 ice_release_change_lock(hw);
5503 /* Every recipe that just got created add it to the recipe
5506 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5507 struct ice_switch_info *sw = hw->switch_info;
5508 bool is_root, idx_found = false;
5509 struct ice_sw_recipe *recp;
5510 u16 idx, buf_idx = 0;
5512 /* find buffer index for copying some data */
5513 for (idx = 0; idx < rm->n_grp_count; idx++)
5514 if (buf[idx].recipe_indx == entry->rid) {
5520 status = ICE_ERR_OUT_OF_RANGE;
5524 recp = &sw->recp_list[entry->rid];
5525 is_root = (rm->root_rid == entry->rid);
5526 recp->is_root = is_root;
5528 recp->root_rid = entry->rid;
5529 recp->big_recp = (is_root && rm->n_grp_count > 1);
5531 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5532 entry->r_group.n_val_pairs *
5533 sizeof(struct ice_fv_word),
5534 ICE_NONDMA_TO_NONDMA);
5536 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5537 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5539 /* Copy non-result fv index values and masks to recipe. This
5540 * call will also update the result recipe bitmask.
5542 ice_collect_result_idx(&buf[buf_idx], recp);
5544 /* for non-root recipes, also copy to the root, this allows
5545 * easier matching of a complete chained recipe
5548 ice_collect_result_idx(&buf[buf_idx],
5549 &sw->recp_list[rm->root_rid]);
5551 recp->n_ext_words = entry->r_group.n_val_pairs;
5552 recp->chain_idx = entry->chain_idx;
5553 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5554 recp->n_grp_count = rm->n_grp_count;
5555 recp->tun_type = rm->tun_type;
5556 recp->recp_created = true;
5571 * ice_create_recipe_group - creates recipe group
5572 * @hw: pointer to hardware structure
5573 * @rm: recipe management list entry
5574 * @lkup_exts: lookup elements
5576 static enum ice_status
5577 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5578 struct ice_prot_lkup_ext *lkup_exts)
5580 enum ice_status status;
5583 rm->n_grp_count = 0;
5585 /* Create recipes for words that are marked not done by packing them
5588 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5589 &rm->rg_list, &recp_count);
5591 rm->n_grp_count += recp_count;
5592 rm->n_ext_words = lkup_exts->n_val_words;
5593 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5594 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5595 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5596 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5603 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5604 * @hw: pointer to hardware structure
5605 * @lkups: lookup elements or match criteria for the advanced recipe, one
5606 * structure per protocol header
5607 * @lkups_cnt: number of protocols
5608 * @bm: bitmap of field vectors to consider
5609 * @fv_list: pointer to a list that holds the returned field vectors
5611 static enum ice_status
5612 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5613 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5615 enum ice_status status;
5619 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5621 return ICE_ERR_NO_MEMORY;
5623 for (i = 0; i < lkups_cnt; i++)
5624 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5625 status = ICE_ERR_CFG;
5629 /* Find field vectors that include all specified protocol types */
5630 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5633 ice_free(hw, prot_ids);
5638 * ice_add_special_words - Add words that are not protocols, such as metadata
5639 * @rinfo: other information regarding the rule e.g. priority and action info
5640 * @lkup_exts: lookup word structure
5642 static enum ice_status
5643 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5644 struct ice_prot_lkup_ext *lkup_exts)
5646 /* If this is a tunneled packet, then add recipe index to match the
5647 * tunnel bit in the packet metadata flags.
5649 if (rinfo->tun_type != ICE_NON_TUN) {
5650 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5651 u8 word = lkup_exts->n_val_words++;
5653 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5654 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5656 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5658 return ICE_ERR_MAX_LIMIT;
5665 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5666 * @hw: pointer to hardware structure
5667 * @rinfo: other information regarding the rule e.g. priority and action info
5668 * @bm: pointer to memory for returning the bitmap of field vectors
5671 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5674 enum ice_prof_type prof_type;
5676 switch (rinfo->tun_type) {
5678 prof_type = ICE_PROF_NON_TUN;
5680 case ICE_ALL_TUNNELS:
5681 prof_type = ICE_PROF_TUN_ALL;
5683 case ICE_SW_TUN_VXLAN_GPE:
5684 case ICE_SW_TUN_GENEVE:
5685 case ICE_SW_TUN_VXLAN:
5686 case ICE_SW_TUN_UDP:
5687 case ICE_SW_TUN_GTP:
5688 prof_type = ICE_PROF_TUN_UDP;
5690 case ICE_SW_TUN_NVGRE:
5691 prof_type = ICE_PROF_TUN_GRE;
5693 case ICE_SW_TUN_PPPOE:
5694 prof_type = ICE_PROF_TUN_PPPOE;
5696 case ICE_SW_TUN_AND_NON_TUN:
5698 prof_type = ICE_PROF_ALL;
5702 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5706 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5707 * @hw: pointer to hardware structure
5708 * @lkups: lookup elements or match criteria for the advanced recipe, one
5709 * structure per protocol header
5710 * @lkups_cnt: number of protocols
5711 * @rinfo: other information regarding the rule e.g. priority and action info
5712 * @rid: return the recipe ID of the recipe created
5714 static enum ice_status
5715 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5716 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5718 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5719 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5720 struct ice_prot_lkup_ext *lkup_exts;
5721 struct ice_recp_grp_entry *r_entry;
5722 struct ice_sw_fv_list_entry *fvit;
5723 struct ice_recp_grp_entry *r_tmp;
5724 struct ice_sw_fv_list_entry *tmp;
5725 enum ice_status status = ICE_SUCCESS;
5726 struct ice_sw_recipe *rm;
5727 bool match_tun = false;
5731 return ICE_ERR_PARAM;
5733 lkup_exts = (struct ice_prot_lkup_ext *)
5734 ice_malloc(hw, sizeof(*lkup_exts));
5736 return ICE_ERR_NO_MEMORY;
5738 /* Determine the number of words to be matched and if it exceeds a
5739 * recipe's restrictions
5741 for (i = 0; i < lkups_cnt; i++) {
5744 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5745 status = ICE_ERR_CFG;
5746 goto err_free_lkup_exts;
5749 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5751 status = ICE_ERR_CFG;
5752 goto err_free_lkup_exts;
5756 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5758 status = ICE_ERR_NO_MEMORY;
5759 goto err_free_lkup_exts;
5762 /* Get field vectors that contain fields extracted from all the protocol
5763 * headers being programmed.
5765 INIT_LIST_HEAD(&rm->fv_list);
5766 INIT_LIST_HEAD(&rm->rg_list);
5768 /* Get bitmap of field vectors (profiles) that are compatible with the
5769 * rule request; only these will be searched in the subsequent call to
5772 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5774 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5778 /* Group match words into recipes using preferred recipe grouping
5781 status = ice_create_recipe_group(hw, rm, lkup_exts);
5785 /* There is only profile for UDP tunnels. So, it is necessary to use a
5786 * metadata ID flag to differentiate different tunnel types. A separate
5787 * recipe needs to be used for the metadata.
5789 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5790 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5791 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5794 /* set the recipe priority if specified */
5795 rm->priority = rinfo->priority ? rinfo->priority : 0;
5797 /* Find offsets from the field vector. Pick the first one for all the
5800 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5804 /* get bitmap of all profiles the recipe will be associated with */
5805 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5806 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5808 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5809 ice_set_bit((u16)fvit->profile_id, profiles);
5812 /* Create any special protocol/offset pairs, such as looking at tunnel
5813 * bits by extracting metadata
5815 status = ice_add_special_words(rinfo, lkup_exts);
5817 goto err_free_lkup_exts;
5819 /* Look for a recipe which matches our requested fv / mask list */
5820 *rid = ice_find_recp(hw, lkup_exts);
5821 if (*rid < ICE_MAX_NUM_RECIPES)
5822 /* Success if found a recipe that match the existing criteria */
5825 /* Recipe we need does not exist, add a recipe */
5826 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5830 /* Associate all the recipes created with all the profiles in the
5831 * common field vector.
5833 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5835 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5838 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5839 (u8 *)r_bitmap, NULL);
5843 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5844 ICE_MAX_NUM_RECIPES);
5845 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5849 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5852 ice_release_change_lock(hw);
5857 /* Update profile to recipe bitmap array */
5858 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5859 ICE_MAX_NUM_RECIPES);
5861 /* Update recipe to profile bitmap array */
5862 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5863 if (ice_is_bit_set(r_bitmap, j))
5864 ice_set_bit((u16)fvit->profile_id,
5865 recipe_to_profile[j]);
5868 *rid = rm->root_rid;
5869 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5870 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5872 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5873 ice_recp_grp_entry, l_entry) {
5874 LIST_DEL(&r_entry->l_entry);
5875 ice_free(hw, r_entry);
5878 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5880 LIST_DEL(&fvit->list_entry);
5885 ice_free(hw, rm->root_buf);
5890 ice_free(hw, lkup_exts);
5896 * ice_find_dummy_packet - find dummy packet by tunnel type
5898 * @lkups: lookup elements or match criteria for the advanced recipe, one
5899 * structure per protocol header
5900 * @lkups_cnt: number of protocols
5901 * @tun_type: tunnel type from the match criteria
5902 * @pkt: dummy packet to fill according to filter match criteria
5903 * @pkt_len: packet length of dummy packet
5904 * @offsets: pointer to receive the pointer to the offsets for the packet
5907 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5908 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5910 const struct ice_dummy_pkt_offsets **offsets)
5912 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5915 if (tun_type == ICE_SW_TUN_GTP) {
5916 *pkt = dummy_udp_gtp_packet;
5917 *pkt_len = sizeof(dummy_udp_gtp_packet);
5918 *offsets = dummy_udp_gtp_packet_offsets;
5921 if (tun_type == ICE_SW_TUN_PPPOE) {
5922 *pkt = dummy_pppoe_packet;
5923 *pkt_len = sizeof(dummy_pppoe_packet);
5924 *offsets = dummy_pppoe_packet_offsets;
5927 for (i = 0; i < lkups_cnt; i++) {
5928 if (lkups[i].type == ICE_UDP_ILOS)
5930 else if (lkups[i].type == ICE_TCP_IL)
5932 else if (lkups[i].type == ICE_IPV6_OFOS)
5934 else if (lkups[i].type == ICE_VLAN_OFOS)
5938 if (tun_type == ICE_ALL_TUNNELS) {
5939 *pkt = dummy_gre_udp_packet;
5940 *pkt_len = sizeof(dummy_gre_udp_packet);
5941 *offsets = dummy_gre_udp_packet_offsets;
5945 if (tun_type == ICE_SW_TUN_NVGRE) {
5947 *pkt = dummy_gre_tcp_packet;
5948 *pkt_len = sizeof(dummy_gre_tcp_packet);
5949 *offsets = dummy_gre_tcp_packet_offsets;
5953 *pkt = dummy_gre_udp_packet;
5954 *pkt_len = sizeof(dummy_gre_udp_packet);
5955 *offsets = dummy_gre_udp_packet_offsets;
5959 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5960 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5962 *pkt = dummy_udp_tun_tcp_packet;
5963 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5964 *offsets = dummy_udp_tun_tcp_packet_offsets;
5968 *pkt = dummy_udp_tun_udp_packet;
5969 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5970 *offsets = dummy_udp_tun_udp_packet_offsets;
5976 *pkt = dummy_vlan_udp_packet;
5977 *pkt_len = sizeof(dummy_vlan_udp_packet);
5978 *offsets = dummy_vlan_udp_packet_offsets;
5981 *pkt = dummy_udp_packet;
5982 *pkt_len = sizeof(dummy_udp_packet);
5983 *offsets = dummy_udp_packet_offsets;
5985 } else if (udp && ipv6) {
5987 *pkt = dummy_vlan_udp_ipv6_packet;
5988 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5989 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5992 *pkt = dummy_udp_ipv6_packet;
5993 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5994 *offsets = dummy_udp_ipv6_packet_offsets;
5996 } else if ((tcp && ipv6) || ipv6) {
5998 *pkt = dummy_vlan_tcp_ipv6_packet;
5999 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6000 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6003 *pkt = dummy_tcp_ipv6_packet;
6004 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6005 *offsets = dummy_tcp_ipv6_packet_offsets;
6010 *pkt = dummy_vlan_tcp_packet;
6011 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6012 *offsets = dummy_vlan_tcp_packet_offsets;
6014 *pkt = dummy_tcp_packet;
6015 *pkt_len = sizeof(dummy_tcp_packet);
6016 *offsets = dummy_tcp_packet_offsets;
6021 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6023 * @lkups: lookup elements or match criteria for the advanced recipe, one
6024 * structure per protocol header
6025 * @lkups_cnt: number of protocols
6026 * @s_rule: stores rule information from the match criteria
6027 * @dummy_pkt: dummy packet to fill according to filter match criteria
6028 * @pkt_len: packet length of dummy packet
6029 * @offsets: offset info for the dummy packet
6031 static enum ice_status
6032 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6033 struct ice_aqc_sw_rules_elem *s_rule,
6034 const u8 *dummy_pkt, u16 pkt_len,
6035 const struct ice_dummy_pkt_offsets *offsets)
6040 /* Start with a packet with a pre-defined/dummy content. Then, fill
6041 * in the header values to be looked up or matched.
6043 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6045 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6047 for (i = 0; i < lkups_cnt; i++) {
6048 enum ice_protocol_type type;
6049 u16 offset = 0, len = 0, j;
6052 /* find the start of this layer; it should be found since this
6053 * was already checked when search for the dummy packet
6055 type = lkups[i].type;
6056 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6057 if (type == offsets[j].type) {
6058 offset = offsets[j].offset;
6063 /* this should never happen in a correct calling sequence */
6065 return ICE_ERR_PARAM;
6067 switch (lkups[i].type) {
6070 len = sizeof(struct ice_ether_hdr);
6073 len = sizeof(struct ice_ethtype_hdr);
6076 len = sizeof(struct ice_vlan_hdr);
6080 len = sizeof(struct ice_ipv4_hdr);
6084 len = sizeof(struct ice_ipv6_hdr);
6089 len = sizeof(struct ice_l4_hdr);
6092 len = sizeof(struct ice_sctp_hdr);
6095 len = sizeof(struct ice_nvgre);
6100 len = sizeof(struct ice_udp_tnl_hdr);
6104 len = sizeof(struct ice_udp_gtp_hdr);
6107 len = sizeof(struct ice_pppoe_hdr);
6110 return ICE_ERR_PARAM;
6113 /* the length should be a word multiple */
6114 if (len % ICE_BYTES_PER_WORD)
6117 /* We have the offset to the header start, the length, the
6118 * caller's header values and mask. Use this information to
6119 * copy the data into the dummy packet appropriately based on
6120 * the mask. Note that we need to only write the bits as
6121 * indicated by the mask to make sure we don't improperly write
6122 * over any significant packet data.
6124 for (j = 0; j < len / sizeof(u16); j++)
6125 if (((u16 *)&lkups[i].m_u)[j])
6126 ((u16 *)(pkt + offset))[j] =
6127 (((u16 *)(pkt + offset))[j] &
6128 ~((u16 *)&lkups[i].m_u)[j]) |
6129 (((u16 *)&lkups[i].h_u)[j] &
6130 ((u16 *)&lkups[i].m_u)[j]);
6133 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6139 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6140 * @hw: pointer to the hardware structure
6141 * @tun_type: tunnel type
6142 * @pkt: dummy packet to fill in
6143 * @offsets: offset info for the dummy packet
6145 static enum ice_status
6146 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6147 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6152 case ICE_SW_TUN_AND_NON_TUN:
6153 case ICE_SW_TUN_VXLAN_GPE:
6154 case ICE_SW_TUN_VXLAN:
6155 case ICE_SW_TUN_UDP:
6156 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6160 case ICE_SW_TUN_GENEVE:
6161 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6166 /* Nothing needs to be done for this tunnel type */
6170 /* Find the outer UDP protocol header and insert the port number */
6171 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6172 if (offsets[i].type == ICE_UDP_OF) {
6173 struct ice_l4_hdr *hdr;
6176 offset = offsets[i].offset;
6177 hdr = (struct ice_l4_hdr *)&pkt[offset];
6178 hdr->dst_port = CPU_TO_BE16(open_port);
6188 * ice_find_adv_rule_entry - Search a rule entry
6189 * @hw: pointer to the hardware structure
6190 * @lkups: lookup elements or match criteria for the advanced recipe, one
6191 * structure per protocol header
6192 * @lkups_cnt: number of protocols
6193 * @recp_id: recipe ID for which we are finding the rule
6194 * @rinfo: other information regarding the rule e.g. priority and action info
6196 * Helper function to search for a given advance rule entry
6197 * Returns pointer to entry storing the rule if found
6199 static struct ice_adv_fltr_mgmt_list_entry *
6200 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6201 u16 lkups_cnt, u8 recp_id,
6202 struct ice_adv_rule_info *rinfo)
6204 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6205 struct ice_switch_info *sw = hw->switch_info;
6208 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6209 ice_adv_fltr_mgmt_list_entry, list_entry) {
6210 bool lkups_matched = true;
6212 if (lkups_cnt != list_itr->lkups_cnt)
6214 for (i = 0; i < list_itr->lkups_cnt; i++)
6215 if (memcmp(&list_itr->lkups[i], &lkups[i],
6217 lkups_matched = false;
6220 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6221 rinfo->tun_type == list_itr->rule_info.tun_type &&
6229 * ice_adv_add_update_vsi_list
6230 * @hw: pointer to the hardware structure
6231 * @m_entry: pointer to current adv filter management list entry
6232 * @cur_fltr: filter information from the book keeping entry
6233 * @new_fltr: filter information with the new VSI to be added
6235 * Call AQ command to add or update previously created VSI list with new VSI.
6237 * Helper function to do book keeping associated with adding filter information
6238 * The algorithm to do the booking keeping is described below :
6239 * When a VSI needs to subscribe to a given advanced filter
6240 * if only one VSI has been added till now
6241 * Allocate a new VSI list and add two VSIs
6242 * to this list using switch rule command
6243 * Update the previously created switch rule with the
6244 * newly created VSI list ID
6245 * if a VSI list was previously created
6246 * Add the new VSI to the previously created VSI list set
6247 * using the update switch rule command
6249 static enum ice_status
6250 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6251 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6252 struct ice_adv_rule_info *cur_fltr,
6253 struct ice_adv_rule_info *new_fltr)
6255 enum ice_status status;
6256 u16 vsi_list_id = 0;
6258 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6259 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6260 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6261 return ICE_ERR_NOT_IMPL;
6263 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6264 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6265 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6266 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6267 return ICE_ERR_NOT_IMPL;
6269 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6270 /* Only one entry existed in the mapping and it was not already
6271 * a part of a VSI list. So, create a VSI list with the old and
6274 struct ice_fltr_info tmp_fltr;
6275 u16 vsi_handle_arr[2];
6277 /* A rule already exists with the new VSI being added */
6278 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6279 new_fltr->sw_act.fwd_id.hw_vsi_id)
6280 return ICE_ERR_ALREADY_EXISTS;
6282 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6283 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6284 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6290 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6291 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6292 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6293 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6294 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6296 /* Update the previous switch rule of "forward to VSI" to
6299 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6303 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6304 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6305 m_entry->vsi_list_info =
6306 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6309 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6311 if (!m_entry->vsi_list_info)
6314 /* A rule already exists with the new VSI being added */
6315 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6318 /* Update the previously created VSI list set with
6319 * the new VSI ID passed in
6321 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6323 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6325 ice_aqc_opc_update_sw_rules,
6327 /* update VSI list mapping info with new VSI ID */
6329 ice_set_bit(vsi_handle,
6330 m_entry->vsi_list_info->vsi_map);
6333 m_entry->vsi_count++;
6338 * ice_add_adv_rule - helper function to create an advanced switch rule
6339 * @hw: pointer to the hardware structure
6340 * @lkups: information on the words that needs to be looked up. All words
6341 * together makes one recipe
6342 * @lkups_cnt: num of entries in the lkups array
6343 * @rinfo: other information related to the rule that needs to be programmed
6344 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6345 * ignored is case of error.
6347 * This function can program only 1 rule at a time. The lkups is used to
6348 * describe the all the words that forms the "lookup" portion of the recipe.
6349 * These words can span multiple protocols. Callers to this function need to
6350 * pass in a list of protocol headers with lookup information along and mask
6351 * that determines which words are valid from the given protocol header.
6352 * rinfo describes other information related to this rule such as forwarding
6353 * IDs, priority of this rule, etc.
6356 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6357 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6358 struct ice_rule_query_data *added_entry)
6360 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6361 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6362 const struct ice_dummy_pkt_offsets *pkt_offsets;
6363 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6364 struct LIST_HEAD_TYPE *rule_head;
6365 struct ice_switch_info *sw;
6366 enum ice_status status;
6367 const u8 *pkt = NULL;
6372 /* Initialize profile to result index bitmap */
6373 if (!hw->switch_info->prof_res_bm_init) {
6374 hw->switch_info->prof_res_bm_init = 1;
6375 ice_init_prof_result_bm(hw);
6379 return ICE_ERR_PARAM;
6381 /* get # of words we need to match */
6383 for (i = 0; i < lkups_cnt; i++) {
6386 ptr = (u16 *)&lkups[i].m_u;
6387 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6391 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6392 return ICE_ERR_PARAM;
6394 /* make sure that we can locate a dummy packet */
6395 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6398 status = ICE_ERR_PARAM;
6399 goto err_ice_add_adv_rule;
6402 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6403 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6404 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6405 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6408 vsi_handle = rinfo->sw_act.vsi_handle;
6409 if (!ice_is_vsi_valid(hw, vsi_handle))
6410 return ICE_ERR_PARAM;
6412 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6413 rinfo->sw_act.fwd_id.hw_vsi_id =
6414 ice_get_hw_vsi_num(hw, vsi_handle);
6415 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6416 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6418 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6421 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6423 /* we have to add VSI to VSI_LIST and increment vsi_count.
6424 * Also Update VSI list so that we can change forwarding rule
6425 * if the rule already exists, we will check if it exists with
6426 * same vsi_id, if not then add it to the VSI list if it already
6427 * exists if not then create a VSI list and add the existing VSI
6428 * ID and the new VSI ID to the list
6429 * We will add that VSI to the list
6431 status = ice_adv_add_update_vsi_list(hw, m_entry,
6432 &m_entry->rule_info,
6435 added_entry->rid = rid;
6436 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6437 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6441 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6442 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6444 return ICE_ERR_NO_MEMORY;
6445 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6446 switch (rinfo->sw_act.fltr_act) {
6447 case ICE_FWD_TO_VSI:
6448 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6449 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6450 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6453 act |= ICE_SINGLE_ACT_TO_Q;
6454 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6455 ICE_SINGLE_ACT_Q_INDEX_M;
6457 case ICE_FWD_TO_QGRP:
6458 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6459 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6460 act |= ICE_SINGLE_ACT_TO_Q;
6461 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6462 ICE_SINGLE_ACT_Q_INDEX_M;
6463 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6464 ICE_SINGLE_ACT_Q_REGION_M;
6466 case ICE_DROP_PACKET:
6467 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6468 ICE_SINGLE_ACT_VALID_BIT;
6471 status = ICE_ERR_CFG;
6472 goto err_ice_add_adv_rule;
6475 /* set the rule LOOKUP type based on caller specified 'RX'
6476 * instead of hardcoding it to be either LOOKUP_TX/RX
6478 * for 'RX' set the source to be the port number
6479 * for 'TX' set the source to be the source HW VSI number (determined
6483 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6484 s_rule->pdata.lkup_tx_rx.src =
6485 CPU_TO_LE16(hw->port_info->lport);
6487 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6488 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6491 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6492 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6494 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6495 pkt_len, pkt_offsets);
6497 goto err_ice_add_adv_rule;
6499 if (rinfo->tun_type != ICE_NON_TUN &&
6500 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6501 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6502 s_rule->pdata.lkup_tx_rx.hdr,
6505 goto err_ice_add_adv_rule;
6508 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6509 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6512 goto err_ice_add_adv_rule;
6513 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6514 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6516 status = ICE_ERR_NO_MEMORY;
6517 goto err_ice_add_adv_rule;
6520 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6521 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6522 ICE_NONDMA_TO_NONDMA);
6523 if (!adv_fltr->lkups) {
6524 status = ICE_ERR_NO_MEMORY;
6525 goto err_ice_add_adv_rule;
6528 adv_fltr->lkups_cnt = lkups_cnt;
6529 adv_fltr->rule_info = *rinfo;
6530 adv_fltr->rule_info.fltr_rule_id =
6531 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6532 sw = hw->switch_info;
6533 sw->recp_list[rid].adv_rule = true;
6534 rule_head = &sw->recp_list[rid].filt_rules;
6536 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6537 struct ice_fltr_info tmp_fltr;
6539 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6540 tmp_fltr.fltr_rule_id =
6541 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6542 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6543 tmp_fltr.fwd_id.hw_vsi_id =
6544 ice_get_hw_vsi_num(hw, vsi_handle);
6545 tmp_fltr.vsi_handle = vsi_handle;
6546 /* Update the previous switch rule of "forward to VSI" to
6549 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6551 goto err_ice_add_adv_rule;
6552 adv_fltr->vsi_count = 1;
6555 /* Add rule entry to book keeping list */
6556 LIST_ADD(&adv_fltr->list_entry, rule_head);
6558 added_entry->rid = rid;
6559 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6560 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6562 err_ice_add_adv_rule:
6563 if (status && adv_fltr) {
6564 ice_free(hw, adv_fltr->lkups);
6565 ice_free(hw, adv_fltr);
6568 ice_free(hw, s_rule);
6574 * ice_adv_rem_update_vsi_list
6575 * @hw: pointer to the hardware structure
6576 * @vsi_handle: VSI handle of the VSI to remove
6577 * @fm_list: filter management entry for which the VSI list management needs to
6580 static enum ice_status
6581 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6582 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6584 struct ice_vsi_list_map_info *vsi_list_info;
6585 enum ice_sw_lkup_type lkup_type;
6586 enum ice_status status;
6589 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6590 fm_list->vsi_count == 0)
6591 return ICE_ERR_PARAM;
6593 /* A rule with the VSI being removed does not exist */
6594 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6595 return ICE_ERR_DOES_NOT_EXIST;
6597 lkup_type = ICE_SW_LKUP_LAST;
6598 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6599 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6600 ice_aqc_opc_update_sw_rules,
6605 fm_list->vsi_count--;
6606 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6607 vsi_list_info = fm_list->vsi_list_info;
6608 if (fm_list->vsi_count == 1) {
6609 struct ice_fltr_info tmp_fltr;
6612 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6614 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6615 return ICE_ERR_OUT_OF_RANGE;
6617 /* Make sure VSI list is empty before removing it below */
6618 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6620 ice_aqc_opc_update_sw_rules,
6625 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6626 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6627 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6628 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6629 tmp_fltr.fwd_id.hw_vsi_id =
6630 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6631 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6632 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6634 /* Update the previous switch rule of "MAC forward to VSI" to
6635 * "MAC fwd to VSI list"
6637 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6639 ice_debug(hw, ICE_DBG_SW,
6640 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6641 tmp_fltr.fwd_id.hw_vsi_id, status);
6645 /* Remove the VSI list since it is no longer used */
6646 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6648 ice_debug(hw, ICE_DBG_SW,
6649 "Failed to remove VSI list %d, error %d\n",
6650 vsi_list_id, status);
6654 LIST_DEL(&vsi_list_info->list_entry);
6655 ice_free(hw, vsi_list_info);
6656 fm_list->vsi_list_info = NULL;
6663 * ice_rem_adv_rule - removes existing advanced switch rule
6664 * @hw: pointer to the hardware structure
6665 * @lkups: information on the words that needs to be looked up. All words
6666 * together makes one recipe
6667 * @lkups_cnt: num of entries in the lkups array
6668 * @rinfo: Its the pointer to the rule information for the rule
6670 * This function can be used to remove 1 rule at a time. The lkups is
6671 * used to describe all the words that forms the "lookup" portion of the
6672 * rule. These words can span multiple protocols. Callers to this function
6673 * need to pass in a list of protocol headers with lookup information along
6674 * and mask that determines which words are valid from the given protocol
6675 * header. rinfo describes other information related to this rule such as
6676 * forwarding IDs, priority of this rule, etc.
6679 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6680 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6682 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6683 struct ice_prot_lkup_ext lkup_exts;
6684 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6685 enum ice_status status = ICE_SUCCESS;
6686 bool remove_rule = false;
6687 u16 i, rid, vsi_handle;
6689 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6690 for (i = 0; i < lkups_cnt; i++) {
6693 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6696 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6701 /* Create any special protocol/offset pairs, such as looking at tunnel
6702 * bits by extracting metadata
6704 status = ice_add_special_words(rinfo, &lkup_exts);
6708 rid = ice_find_recp(hw, &lkup_exts);
6709 /* If did not find a recipe that match the existing criteria */
6710 if (rid == ICE_MAX_NUM_RECIPES)
6711 return ICE_ERR_PARAM;
6713 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6714 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6715 /* the rule is already removed */
6718 ice_acquire_lock(rule_lock);
6719 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6721 } else if (list_elem->vsi_count > 1) {
6722 list_elem->vsi_list_info->ref_cnt--;
6723 remove_rule = false;
6724 vsi_handle = rinfo->sw_act.vsi_handle;
6725 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6727 vsi_handle = rinfo->sw_act.vsi_handle;
6728 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6730 ice_release_lock(rule_lock);
6733 if (list_elem->vsi_count == 0)
6736 ice_release_lock(rule_lock);
6738 struct ice_aqc_sw_rules_elem *s_rule;
6741 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6743 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6746 return ICE_ERR_NO_MEMORY;
6747 s_rule->pdata.lkup_tx_rx.act = 0;
6748 s_rule->pdata.lkup_tx_rx.index =
6749 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6750 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6751 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6753 ice_aqc_opc_remove_sw_rules, NULL);
6754 if (status == ICE_SUCCESS) {
6755 ice_acquire_lock(rule_lock);
6756 LIST_DEL(&list_elem->list_entry);
6757 ice_free(hw, list_elem->lkups);
6758 ice_free(hw, list_elem);
6759 ice_release_lock(rule_lock);
6761 ice_free(hw, s_rule);
6767 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6768 * @hw: pointer to the hardware structure
6769 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6771 * This function is used to remove 1 rule at a time. The removal is based on
6772 * the remove_entry parameter. This function will remove rule for a given
6773 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6776 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6777 struct ice_rule_query_data *remove_entry)
6779 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6780 struct LIST_HEAD_TYPE *list_head;
6781 struct ice_adv_rule_info rinfo;
6782 struct ice_switch_info *sw;
6784 sw = hw->switch_info;
6785 if (!sw->recp_list[remove_entry->rid].recp_created)
6786 return ICE_ERR_PARAM;
6787 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6788 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6790 if (list_itr->rule_info.fltr_rule_id ==
6791 remove_entry->rule_id) {
6792 rinfo = list_itr->rule_info;
6793 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6794 return ice_rem_adv_rule(hw, list_itr->lkups,
6795 list_itr->lkups_cnt, &rinfo);
6798 return ICE_ERR_PARAM;
6802 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6804 * @hw: pointer to the hardware structure
6805 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6807 * This function is used to remove all the rules for a given VSI and as soon
6808 * as removing a rule fails, it will return immediately with the error code,
6809 * else it will return ICE_SUCCESS
6812 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6814 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6815 struct ice_vsi_list_map_info *map_info;
6816 struct LIST_HEAD_TYPE *list_head;
6817 struct ice_adv_rule_info rinfo;
6818 struct ice_switch_info *sw;
6819 enum ice_status status;
6820 u16 vsi_list_id = 0;
6823 sw = hw->switch_info;
6824 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6825 if (!sw->recp_list[rid].recp_created)
6827 if (!sw->recp_list[rid].adv_rule)
6829 list_head = &sw->recp_list[rid].filt_rules;
6831 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6832 ice_adv_fltr_mgmt_list_entry, list_entry) {
6833 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6838 rinfo = list_itr->rule_info;
6839 rinfo.sw_act.vsi_handle = vsi_handle;
6840 status = ice_rem_adv_rule(hw, list_itr->lkups,
6841 list_itr->lkups_cnt, &rinfo);
6851 * ice_replay_fltr - Replay all the filters stored by a specific list head
6852 * @hw: pointer to the hardware structure
6853 * @list_head: list for which filters needs to be replayed
6854 * @recp_id: Recipe ID for which rules need to be replayed
6856 static enum ice_status
6857 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6859 struct ice_fltr_mgmt_list_entry *itr;
6860 enum ice_status status = ICE_SUCCESS;
6861 struct ice_sw_recipe *recp_list;
6862 u8 lport = hw->port_info->lport;
6863 struct LIST_HEAD_TYPE l_head;
6865 if (LIST_EMPTY(list_head))
6868 recp_list = &hw->switch_info->recp_list[recp_id];
6869 /* Move entries from the given list_head to a temporary l_head so that
6870 * they can be replayed. Otherwise when trying to re-add the same
6871 * filter, the function will return already exists
6873 LIST_REPLACE_INIT(list_head, &l_head);
6875 /* Mark the given list_head empty by reinitializing it so filters
6876 * could be added again by *handler
6878 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6880 struct ice_fltr_list_entry f_entry;
6882 f_entry.fltr_info = itr->fltr_info;
6883 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6884 status = ice_add_rule_internal(hw, recp_list, lport,
6886 if (status != ICE_SUCCESS)
6891 /* Add a filter per VSI separately */
6896 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6898 if (!ice_is_vsi_valid(hw, vsi_handle))
6901 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6902 f_entry.fltr_info.vsi_handle = vsi_handle;
6903 f_entry.fltr_info.fwd_id.hw_vsi_id =
6904 ice_get_hw_vsi_num(hw, vsi_handle);
6905 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6906 if (recp_id == ICE_SW_LKUP_VLAN)
6907 status = ice_add_vlan_internal(hw, recp_list,
6910 status = ice_add_rule_internal(hw, recp_list,
6913 if (status != ICE_SUCCESS)
6918 /* Clear the filter management list */
6919 ice_rem_sw_rule_info(hw, &l_head);
6924 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6925 * @hw: pointer to the hardware structure
6927 * NOTE: This function does not clean up partially added filters on error.
6928 * It is up to caller of the function to issue a reset or fail early.
6930 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6932 struct ice_switch_info *sw = hw->switch_info;
6933 enum ice_status status = ICE_SUCCESS;
6936 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6937 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6939 status = ice_replay_fltr(hw, i, head);
6940 if (status != ICE_SUCCESS)
6947 * ice_replay_vsi_fltr - Replay filters for requested VSI
6948 * @hw: pointer to the hardware structure
6949 * @vsi_handle: driver VSI handle
6950 * @recp_id: Recipe ID for which rules need to be replayed
6951 * @list_head: list for which filters need to be replayed
6953 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6954 * It is required to pass valid VSI handle.
6956 static enum ice_status
6957 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6958 struct LIST_HEAD_TYPE *list_head)
6960 struct ice_fltr_mgmt_list_entry *itr;
6961 enum ice_status status = ICE_SUCCESS;
6962 struct ice_sw_recipe *recp_list;
6965 if (LIST_EMPTY(list_head))
6967 recp_list = &hw->switch_info->recp_list[recp_id];
6968 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6970 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6972 struct ice_fltr_list_entry f_entry;
6974 f_entry.fltr_info = itr->fltr_info;
6975 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6976 itr->fltr_info.vsi_handle == vsi_handle) {
6977 /* update the src in case it is VSI num */
6978 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6979 f_entry.fltr_info.src = hw_vsi_id;
6980 status = ice_add_rule_internal(hw, recp_list,
6981 hw->port_info->lport,
6983 if (status != ICE_SUCCESS)
6987 if (!itr->vsi_list_info ||
6988 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6990 /* Clearing it so that the logic can add it back */
6991 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6992 f_entry.fltr_info.vsi_handle = vsi_handle;
6993 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6994 /* update the src in case it is VSI num */
6995 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6996 f_entry.fltr_info.src = hw_vsi_id;
6997 if (recp_id == ICE_SW_LKUP_VLAN)
6998 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7000 status = ice_add_rule_internal(hw, recp_list,
7001 hw->port_info->lport,
7003 if (status != ICE_SUCCESS)
7011 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7012 * @hw: pointer to the hardware structure
7013 * @vsi_handle: driver VSI handle
7014 * @list_head: list for which filters need to be replayed
7016 * Replay the advanced rule for the given VSI.
7018 static enum ice_status
7019 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7020 struct LIST_HEAD_TYPE *list_head)
7022 struct ice_rule_query_data added_entry = { 0 };
7023 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7024 enum ice_status status = ICE_SUCCESS;
7026 if (LIST_EMPTY(list_head))
7028 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7030 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7031 u16 lk_cnt = adv_fltr->lkups_cnt;
7033 if (vsi_handle != rinfo->sw_act.vsi_handle)
7035 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7044 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7045 * @hw: pointer to the hardware structure
7046 * @vsi_handle: driver VSI handle
7048 * Replays filters for requested VSI via vsi_handle.
7050 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7052 struct ice_switch_info *sw = hw->switch_info;
7053 enum ice_status status;
7056 /* Update the recipes that were created */
7057 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7058 struct LIST_HEAD_TYPE *head;
7060 head = &sw->recp_list[i].filt_replay_rules;
7061 if (!sw->recp_list[i].adv_rule)
7062 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7064 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7065 if (status != ICE_SUCCESS)
7073 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7074 * @hw: pointer to the HW struct
7076 * Deletes the filter replay rules.
7078 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7080 struct ice_switch_info *sw = hw->switch_info;
7086 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7087 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7088 struct LIST_HEAD_TYPE *l_head;
7090 l_head = &sw->recp_list[i].filt_replay_rules;
7091 if (!sw->recp_list[i].adv_rule)
7092 ice_rem_sw_rule_info(hw, l_head);
7094 ice_rem_adv_rule_info(hw, l_head);