1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
60 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
63 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
71 static const u8 dummy_gre_tcp_packet[] = {
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
76 0x08, 0x00, /* ICE_ETYPE_OL 12 */
78 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
79 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x2F, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
84 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
85 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
88 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00,
92 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
93 0x00, 0x00, 0x00, 0x00,
94 0x00, 0x06, 0x00, 0x00,
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x50, 0x02, 0x20, 0x00,
102 0x00, 0x00, 0x00, 0x00
105 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
107 { ICE_ETYPE_OL, 12 },
108 { ICE_IPV4_OFOS, 14 },
112 { ICE_UDP_ILOS, 76 },
113 { ICE_PROTOCOL_LAST, 0 },
116 static const u8 dummy_gre_udp_packet[] = {
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
121 0x08, 0x00, /* ICE_ETYPE_OL 12 */
123 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
124 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x2F, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
129 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
130 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
137 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x11, 0x00, 0x00,
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
144 0x00, 0x08, 0x00, 0x00,
147 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
149 { ICE_ETYPE_OL, 12 },
150 { ICE_IPV4_OFOS, 14 },
154 { ICE_VXLAN_GPE, 42 },
158 { ICE_PROTOCOL_LAST, 0 },
161 static const u8 dummy_udp_tun_tcp_packet[] = {
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x08, 0x00, /* ICE_ETYPE_OL 12 */
168 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x11, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
175 0x00, 0x46, 0x00, 0x00,
177 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
181 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
185 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
186 0x00, 0x01, 0x00, 0x00,
187 0x40, 0x06, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x50, 0x02, 0x20, 0x00,
195 0x00, 0x00, 0x00, 0x00
198 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
200 { ICE_ETYPE_OL, 12 },
201 { ICE_IPV4_OFOS, 14 },
205 { ICE_VXLAN_GPE, 42 },
208 { ICE_UDP_ILOS, 84 },
209 { ICE_PROTOCOL_LAST, 0 },
212 static const u8 dummy_udp_tun_udp_packet[] = {
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
217 0x08, 0x00, /* ICE_ETYPE_OL 12 */
219 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
226 0x00, 0x3a, 0x00, 0x00,
228 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
236 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
237 0x00, 0x01, 0x00, 0x00,
238 0x00, 0x11, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
243 0x00, 0x08, 0x00, 0x00,
246 /* offset info for MAC + IPv4 + UDP dummy packet */
247 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
249 { ICE_ETYPE_OL, 12 },
250 { ICE_IPV4_OFOS, 14 },
251 { ICE_UDP_ILOS, 34 },
252 { ICE_PROTOCOL_LAST, 0 },
255 /* Dummy packet for MAC + IPv4 + UDP */
256 static const u8 dummy_udp_packet[] = {
257 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
258 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00,
261 0x08, 0x00, /* ICE_ETYPE_OL 12 */
263 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
264 0x00, 0x01, 0x00, 0x00,
265 0x00, 0x11, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
270 0x00, 0x08, 0x00, 0x00,
272 0x00, 0x00, /* 2 bytes for 4 byte alignment */
275 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
276 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
278 { ICE_ETYPE_OL, 12 },
279 { ICE_VLAN_OFOS, 14 },
280 { ICE_IPV4_OFOS, 18 },
281 { ICE_UDP_ILOS, 38 },
282 { ICE_PROTOCOL_LAST, 0 },
285 /* C-tag (801.1Q), IPv4:UDP dummy packet */
286 static const u8 dummy_vlan_udp_packet[] = {
287 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
288 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00,
291 0x81, 0x00, /* ICE_ETYPE_OL 12 */
293 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
295 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
296 0x00, 0x01, 0x00, 0x00,
297 0x00, 0x11, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
302 0x00, 0x08, 0x00, 0x00,
304 0x00, 0x00, /* 2 bytes for 4 byte alignment */
307 /* offset info for MAC + IPv4 + TCP dummy packet */
308 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
310 { ICE_ETYPE_OL, 12 },
311 { ICE_IPV4_OFOS, 14 },
313 { ICE_PROTOCOL_LAST, 0 },
316 /* Dummy packet for MAC + IPv4 + TCP */
317 static const u8 dummy_tcp_packet[] = {
318 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
319 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00,
322 0x08, 0x00, /* ICE_ETYPE_OL 12 */
324 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
325 0x00, 0x01, 0x00, 0x00,
326 0x00, 0x06, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x50, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, /* 2 bytes for 4 byte alignment */
339 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
340 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_VLAN_OFOS, 14 },
344 { ICE_IPV4_OFOS, 18 },
346 { ICE_PROTOCOL_LAST, 0 },
349 /* C-tag (801.1Q), IPv4:TCP dummy packet */
350 static const u8 dummy_vlan_tcp_packet[] = {
351 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
352 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00,
355 0x81, 0x00, /* ICE_ETYPE_OL 12 */
357 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
359 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
360 0x00, 0x01, 0x00, 0x00,
361 0x00, 0x06, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
368 0x50, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, /* 2 bytes for 4 byte alignment */
374 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
376 { ICE_ETYPE_OL, 12 },
377 { ICE_IPV6_OFOS, 14 },
379 { ICE_PROTOCOL_LAST, 0 },
382 static const u8 dummy_tcp_ipv6_packet[] = {
383 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
389 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
390 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
391 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x50, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, /* 2 bytes for 4 byte alignment */
409 /* C-tag (802.1Q): IPv6 + TCP */
410 static const struct ice_dummy_pkt_offsets
411 dummy_vlan_tcp_ipv6_packet_offsets[] = {
413 { ICE_ETYPE_OL, 12 },
414 { ICE_VLAN_OFOS, 14 },
415 { ICE_IPV6_OFOS, 18 },
417 { ICE_PROTOCOL_LAST, 0 },
420 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
421 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x81, 0x00, /* ICE_ETYPE_OL 12 */
428 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
430 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
431 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x50, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, /* 2 bytes for 4 byte alignment */
451 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
453 { ICE_ETYPE_OL, 12 },
454 { ICE_IPV6_OFOS, 14 },
455 { ICE_UDP_ILOS, 54 },
456 { ICE_PROTOCOL_LAST, 0 },
459 /* IPv6 + UDP dummy packet */
460 static const u8 dummy_udp_ipv6_packet[] = {
461 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
462 0x00, 0x00, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00,
465 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
467 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
468 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
469 0x00, 0x00, 0x00, 0x00,
470 0x00, 0x00, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
479 0x00, 0x08, 0x00, 0x00,
481 0x00, 0x00, /* 2 bytes for 4 byte alignment */
484 /* C-tag (802.1Q): IPv6 + UDP */
485 static const struct ice_dummy_pkt_offsets
486 dummy_vlan_udp_ipv6_packet_offsets[] = {
488 { ICE_ETYPE_OL, 12 },
489 { ICE_VLAN_OFOS, 14 },
490 { ICE_IPV6_OFOS, 18 },
491 { ICE_UDP_ILOS, 58 },
492 { ICE_PROTOCOL_LAST, 0 },
495 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
496 static const u8 dummy_vlan_udp_ipv6_packet[] = {
497 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x81, 0x00, /* ICE_ETYPE_OL 12 */
503 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
505 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
506 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
507 0x00, 0x00, 0x00, 0x00,
508 0x00, 0x00, 0x00, 0x00,
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
517 0x00, 0x08, 0x00, 0x00,
519 0x00, 0x00, /* 2 bytes for 4 byte alignment */
522 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
524 { ICE_IPV4_OFOS, 14 },
527 { ICE_PROTOCOL_LAST, 0 },
530 static const u8 dummy_udp_gtp_packet[] = {
531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
532 0x00, 0x00, 0x00, 0x00,
533 0x00, 0x00, 0x00, 0x00,
536 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
537 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x11, 0x00, 0x00,
539 0x00, 0x00, 0x00, 0x00,
540 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
543 0x00, 0x1c, 0x00, 0x00,
545 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
546 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x85,
549 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
550 0x00, 0x00, 0x00, 0x00,
553 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
555 { ICE_ETYPE_OL, 12 },
556 { ICE_VLAN_OFOS, 14},
558 { ICE_PROTOCOL_LAST, 0 },
561 static const u8 dummy_pppoe_packet[] = {
562 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00,
566 0x81, 0x00, /* ICE_ETYPE_OL 12 */
568 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
570 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
573 0x00, 0x21, /* PPP Link Layer 24 */
575 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
584 /* this is a recipe to profile association bitmap */
585 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
586 ICE_MAX_NUM_PROFILES);
588 /* this is a profile to recipe association bitmap */
589 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
590 ICE_MAX_NUM_RECIPES);
592 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
595 * ice_collect_result_idx - copy result index values
596 * @buf: buffer that contains the result index
597 * @recp: the recipe struct to copy data into
599 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
600 struct ice_sw_recipe *recp)
602 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
603 ice_set_bit(buf->content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
608 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
609 * @hw: pointer to hardware structure
610 * @recps: struct that we need to populate
611 * @rid: recipe ID that we are populating
612 * @refresh_required: true if we should get recipe to profile mapping from FW
614 * This function is used to populate all the necessary entries into our
615 * bookkeeping so that we have a current list of all the recipes that are
616 * programmed in the firmware.
618 static enum ice_status
619 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
620 bool *refresh_required)
622 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
623 struct ice_aqc_recipe_data_elem *tmp;
624 u16 num_recps = ICE_MAX_NUM_RECIPES;
625 struct ice_prot_lkup_ext *lkup_exts;
626 enum ice_status status;
630 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
632 /* we need a buffer big enough to accommodate all the recipes */
633 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
634 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
636 return ICE_ERR_NO_MEMORY;
638 tmp[0].recipe_indx = rid;
639 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
640 /* non-zero status meaning recipe doesn't exist */
644 /* Get recipe to profile map so that we can get the fv from lkups that
645 * we read for a recipe from FW. Since we want to minimize the number of
646 * times we make this FW call, just make one call and cache the copy
647 * until a new recipe is added. This operation is only required the
648 * first time to get the changes from FW. Then to search existing
649 * entries we don't need to update the cache again until another recipe
652 if (*refresh_required) {
653 ice_get_recp_to_prof_map(hw);
654 *refresh_required = false;
657 /* Start populating all the entries for recps[rid] based on lkups from
658 * firmware. Note that we are only creating the root recipe in our
661 lkup_exts = &recps[rid].lkup_exts;
663 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
664 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
665 struct ice_recp_grp_entry *rg_entry;
666 u8 i, prof, idx, prot = 0;
670 rg_entry = (struct ice_recp_grp_entry *)
671 ice_malloc(hw, sizeof(*rg_entry));
673 status = ICE_ERR_NO_MEMORY;
677 idx = root_bufs.recipe_indx;
678 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
680 /* Mark all result indices in this chain */
681 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
682 ice_set_bit(root_bufs.content.result_indx &
683 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
685 /* get the first profile that is associated with rid */
686 prof = ice_find_first_bit(recipe_to_profile[idx],
687 ICE_MAX_NUM_PROFILES);
688 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
689 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
691 rg_entry->fv_idx[i] = lkup_indx;
692 rg_entry->fv_mask[i] =
693 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
695 /* If the recipe is a chained recipe then all its
696 * child recipe's result will have a result index.
697 * To fill fv_words we should not use those result
698 * index, we only need the protocol ids and offsets.
699 * We will skip all the fv_idx which stores result
700 * index in them. We also need to skip any fv_idx which
701 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
702 * valid offset value.
704 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
705 rg_entry->fv_idx[i]) ||
706 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
707 rg_entry->fv_idx[i] == 0)
710 ice_find_prot_off(hw, ICE_BLK_SW, prof,
711 rg_entry->fv_idx[i], &prot, &off);
712 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
713 lkup_exts->fv_words[fv_word_idx].off = off;
716 /* populate rg_list with the data from the child entry of this
719 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
721 /* Propagate some data to the recipe database */
722 recps[idx].is_root = !!is_root;
723 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
724 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
725 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
726 recps[idx].chain_idx = root_bufs.content.result_indx &
727 ~ICE_AQ_RECIPE_RESULT_EN;
728 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
730 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
736 /* Only do the following for root recipes entries */
737 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
738 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
739 recps[idx].root_rid = root_bufs.content.rid &
740 ~ICE_AQ_RECIPE_ID_IS_ROOT;
741 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
744 /* Complete initialization of the root recipe entry */
745 lkup_exts->n_val_words = fv_word_idx;
746 recps[rid].big_recp = (num_recps > 1);
747 recps[rid].n_grp_count = (u8)num_recps;
748 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
749 ice_memdup(hw, tmp, recps[rid].n_grp_count *
750 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
751 if (!recps[rid].root_buf)
754 /* Copy result indexes */
755 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
756 recps[rid].recp_created = true;
764 * ice_get_recp_to_prof_map - updates recipe to profile mapping
765 * @hw: pointer to hardware structure
767 * This function is used to populate recipe_to_profile matrix where index to
768 * this array is the recipe ID and the element is the mapping of which profiles
769 * is this recipe mapped to.
772 ice_get_recp_to_prof_map(struct ice_hw *hw)
774 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
777 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
780 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
781 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
782 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
784 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
785 ICE_MAX_NUM_RECIPES);
786 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
787 if (ice_is_bit_set(r_bitmap, j))
788 ice_set_bit(i, recipe_to_profile[j]);
793 * ice_init_def_sw_recp - initialize the recipe book keeping tables
794 * @hw: pointer to the HW struct
795 * @recp_list: pointer to sw recipe list
797 * Allocate memory for the entire recipe table and initialize the structures/
798 * entries corresponding to basic recipes.
801 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
803 struct ice_sw_recipe *recps;
806 recps = (struct ice_sw_recipe *)
807 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
809 return ICE_ERR_NO_MEMORY;
811 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
812 recps[i].root_rid = i;
813 INIT_LIST_HEAD(&recps[i].filt_rules);
814 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
815 INIT_LIST_HEAD(&recps[i].rg_list);
816 ice_init_lock(&recps[i].filt_rule_lock);
825 * ice_aq_get_sw_cfg - get switch configuration
826 * @hw: pointer to the hardware structure
827 * @buf: pointer to the result buffer
828 * @buf_size: length of the buffer available for response
829 * @req_desc: pointer to requested descriptor
830 * @num_elems: pointer to number of elements
831 * @cd: pointer to command details structure or NULL
833 * Get switch configuration (0x0200) to be placed in 'buff'.
834 * This admin command returns information such as initial VSI/port number
835 * and switch ID it belongs to.
837 * NOTE: *req_desc is both an input/output parameter.
838 * The caller of this function first calls this function with *request_desc set
839 * to 0. If the response from f/w has *req_desc set to 0, all the switch
840 * configuration information has been returned; if non-zero (meaning not all
841 * the information was returned), the caller should call this function again
842 * with *req_desc set to the previous value returned by f/w to get the
843 * next block of switch configuration information.
845 * *num_elems is output only parameter. This reflects the number of elements
846 * in response buffer. The caller of this function to use *num_elems while
847 * parsing the response buffer.
849 static enum ice_status
850 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
851 u16 buf_size, u16 *req_desc, u16 *num_elems,
852 struct ice_sq_cd *cd)
854 struct ice_aqc_get_sw_cfg *cmd;
855 enum ice_status status;
856 struct ice_aq_desc desc;
858 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
859 cmd = &desc.params.get_sw_conf;
860 cmd->element = CPU_TO_LE16(*req_desc);
862 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
864 *req_desc = LE16_TO_CPU(cmd->element);
865 *num_elems = LE16_TO_CPU(cmd->num_elems);
872 * ice_alloc_sw - allocate resources specific to switch
873 * @hw: pointer to the HW struct
874 * @ena_stats: true to turn on VEB stats
875 * @shared_res: true for shared resource, false for dedicated resource
876 * @sw_id: switch ID returned
877 * @counter_id: VEB counter ID returned
879 * allocates switch resources (SWID and VEB counter) (0x0208)
882 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
885 struct ice_aqc_alloc_free_res_elem *sw_buf;
886 struct ice_aqc_res_elem *sw_ele;
887 enum ice_status status;
890 buf_len = sizeof(*sw_buf);
891 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
892 ice_malloc(hw, buf_len);
894 return ICE_ERR_NO_MEMORY;
896 /* Prepare buffer for switch ID.
897 * The number of resource entries in buffer is passed as 1 since only a
898 * single switch/VEB instance is allocated, and hence a single sw_id
901 sw_buf->num_elems = CPU_TO_LE16(1);
903 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
904 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
905 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
907 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
908 ice_aqc_opc_alloc_res, NULL);
911 goto ice_alloc_sw_exit;
913 sw_ele = &sw_buf->elem[0];
914 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
917 /* Prepare buffer for VEB Counter */
918 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
919 struct ice_aqc_alloc_free_res_elem *counter_buf;
920 struct ice_aqc_res_elem *counter_ele;
922 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
923 ice_malloc(hw, buf_len);
925 status = ICE_ERR_NO_MEMORY;
926 goto ice_alloc_sw_exit;
929 /* The number of resource entries in buffer is passed as 1 since
930 * only a single switch/VEB instance is allocated, and hence a
931 * single VEB counter is requested.
933 counter_buf->num_elems = CPU_TO_LE16(1);
934 counter_buf->res_type =
935 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
936 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
937 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
941 ice_free(hw, counter_buf);
942 goto ice_alloc_sw_exit;
944 counter_ele = &counter_buf->elem[0];
945 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
946 ice_free(hw, counter_buf);
950 ice_free(hw, sw_buf);
955 * ice_free_sw - free resources specific to switch
956 * @hw: pointer to the HW struct
957 * @sw_id: switch ID returned
958 * @counter_id: VEB counter ID returned
960 * free switch resources (SWID and VEB counter) (0x0209)
962 * NOTE: This function frees multiple resources. It continues
963 * releasing other resources even after it encounters error.
964 * The error code returned is the last error it encountered.
966 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
968 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
969 enum ice_status status, ret_status;
972 buf_len = sizeof(*sw_buf);
973 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
974 ice_malloc(hw, buf_len);
976 return ICE_ERR_NO_MEMORY;
978 /* Prepare buffer to free for switch ID res.
979 * The number of resource entries in buffer is passed as 1 since only a
980 * single switch/VEB instance is freed, and hence a single sw_id
983 sw_buf->num_elems = CPU_TO_LE16(1);
984 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
985 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
987 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
988 ice_aqc_opc_free_res, NULL);
991 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
993 /* Prepare buffer to free for VEB Counter resource */
994 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
995 ice_malloc(hw, buf_len);
997 ice_free(hw, sw_buf);
998 return ICE_ERR_NO_MEMORY;
1001 /* The number of resource entries in buffer is passed as 1 since only a
1002 * single switch/VEB instance is freed, and hence a single VEB counter
1005 counter_buf->num_elems = CPU_TO_LE16(1);
1006 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1007 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1009 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1010 ice_aqc_opc_free_res, NULL);
1012 ice_debug(hw, ICE_DBG_SW,
1013 "VEB counter resource could not be freed\n");
1014 ret_status = status;
1017 ice_free(hw, counter_buf);
1018 ice_free(hw, sw_buf);
1024 * @hw: pointer to the HW struct
1025 * @vsi_ctx: pointer to a VSI context struct
1026 * @cd: pointer to command details structure or NULL
1028 * Add a VSI context to the hardware (0x0210)
1031 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1032 struct ice_sq_cd *cd)
1034 struct ice_aqc_add_update_free_vsi_resp *res;
1035 struct ice_aqc_add_get_update_free_vsi *cmd;
1036 struct ice_aq_desc desc;
1037 enum ice_status status;
1039 cmd = &desc.params.vsi_cmd;
1040 res = &desc.params.add_update_free_vsi_res;
1042 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1044 if (!vsi_ctx->alloc_from_pool)
1045 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1046 ICE_AQ_VSI_IS_VALID);
1048 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1050 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1052 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1053 sizeof(vsi_ctx->info), cd);
1056 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1057 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1058 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1066 * @hw: pointer to the HW struct
1067 * @vsi_ctx: pointer to a VSI context struct
1068 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1069 * @cd: pointer to command details structure or NULL
1071 * Free VSI context info from hardware (0x0213)
1074 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1075 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1077 struct ice_aqc_add_update_free_vsi_resp *resp;
1078 struct ice_aqc_add_get_update_free_vsi *cmd;
1079 struct ice_aq_desc desc;
1080 enum ice_status status;
1082 cmd = &desc.params.vsi_cmd;
1083 resp = &desc.params.add_update_free_vsi_res;
1085 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1087 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1089 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1091 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1093 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1094 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1102 * @hw: pointer to the HW struct
1103 * @vsi_ctx: pointer to a VSI context struct
1104 * @cd: pointer to command details structure or NULL
1106 * Update VSI context in the hardware (0x0211)
1109 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1110 struct ice_sq_cd *cd)
1112 struct ice_aqc_add_update_free_vsi_resp *resp;
1113 struct ice_aqc_add_get_update_free_vsi *cmd;
1114 struct ice_aq_desc desc;
1115 enum ice_status status;
1117 cmd = &desc.params.vsi_cmd;
1118 resp = &desc.params.add_update_free_vsi_res;
1120 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1122 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1124 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1126 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1127 sizeof(vsi_ctx->info), cd);
1130 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1131 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1138 * ice_is_vsi_valid - check whether the VSI is valid or not
1139 * @hw: pointer to the HW struct
1140 * @vsi_handle: VSI handle
1142 * check whether the VSI is valid or not
1144 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1146 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1150 * ice_get_hw_vsi_num - return the HW VSI number
1151 * @hw: pointer to the HW struct
1152 * @vsi_handle: VSI handle
1154 * return the HW VSI number
1155 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1157 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1159 return hw->vsi_ctx[vsi_handle]->vsi_num;
1163 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1164 * @hw: pointer to the HW struct
1165 * @vsi_handle: VSI handle
1167 * return the VSI context entry for a given VSI handle
1169 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1171 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1175 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1176 * @hw: pointer to the HW struct
1177 * @vsi_handle: VSI handle
1178 * @vsi: VSI context pointer
1180 * save the VSI context entry for a given VSI handle
1183 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1185 hw->vsi_ctx[vsi_handle] = vsi;
1189 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1190 * @hw: pointer to the HW struct
1191 * @vsi_handle: VSI handle
1193 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1195 struct ice_vsi_ctx *vsi;
1198 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1201 ice_for_each_traffic_class(i) {
1202 if (vsi->lan_q_ctx[i]) {
1203 ice_free(hw, vsi->lan_q_ctx[i]);
1204 vsi->lan_q_ctx[i] = NULL;
1210 * ice_clear_vsi_ctx - clear the VSI context entry
1211 * @hw: pointer to the HW struct
1212 * @vsi_handle: VSI handle
1214 * clear the VSI context entry
1216 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1218 struct ice_vsi_ctx *vsi;
1220 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1222 ice_clear_vsi_q_ctx(hw, vsi_handle);
1224 hw->vsi_ctx[vsi_handle] = NULL;
1229 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1230 * @hw: pointer to the HW struct
1232 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1236 for (i = 0; i < ICE_MAX_VSI; i++)
1237 ice_clear_vsi_ctx(hw, i);
1241 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1242 * @hw: pointer to the HW struct
1243 * @vsi_handle: unique VSI handle provided by drivers
1244 * @vsi_ctx: pointer to a VSI context struct
1245 * @cd: pointer to command details structure or NULL
1247 * Add a VSI context to the hardware also add it into the VSI handle list.
1248 * If this function gets called after reset for existing VSIs then update
1249 * with the new HW VSI number in the corresponding VSI handle list entry.
1252 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1253 struct ice_sq_cd *cd)
1255 struct ice_vsi_ctx *tmp_vsi_ctx;
1256 enum ice_status status;
1258 if (vsi_handle >= ICE_MAX_VSI)
1259 return ICE_ERR_PARAM;
1260 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1263 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1265 /* Create a new VSI context */
1266 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1267 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1269 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1270 return ICE_ERR_NO_MEMORY;
1272 *tmp_vsi_ctx = *vsi_ctx;
1274 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1276 /* update with new HW VSI num */
1277 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1284 * ice_free_vsi- free VSI context from hardware and VSI handle list
1285 * @hw: pointer to the HW struct
1286 * @vsi_handle: unique VSI handle
1287 * @vsi_ctx: pointer to a VSI context struct
1288 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1289 * @cd: pointer to command details structure or NULL
1291 * Free VSI context info from hardware as well as from VSI handle list
1294 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1295 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1297 enum ice_status status;
1299 if (!ice_is_vsi_valid(hw, vsi_handle))
1300 return ICE_ERR_PARAM;
1301 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1302 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1304 ice_clear_vsi_ctx(hw, vsi_handle);
1310 * @hw: pointer to the HW struct
1311 * @vsi_handle: unique VSI handle
1312 * @vsi_ctx: pointer to a VSI context struct
1313 * @cd: pointer to command details structure or NULL
1315 * Update VSI context in the hardware
1318 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1319 struct ice_sq_cd *cd)
1321 if (!ice_is_vsi_valid(hw, vsi_handle))
1322 return ICE_ERR_PARAM;
1323 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1324 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1328 * ice_aq_get_vsi_params
1329 * @hw: pointer to the HW struct
1330 * @vsi_ctx: pointer to a VSI context struct
1331 * @cd: pointer to command details structure or NULL
1333 * Get VSI context info from hardware (0x0212)
1336 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1337 struct ice_sq_cd *cd)
1339 struct ice_aqc_add_get_update_free_vsi *cmd;
1340 struct ice_aqc_get_vsi_resp *resp;
1341 struct ice_aq_desc desc;
1342 enum ice_status status;
1344 cmd = &desc.params.vsi_cmd;
1345 resp = &desc.params.get_vsi_resp;
1347 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1349 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1351 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1352 sizeof(vsi_ctx->info), cd);
1354 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1356 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1357 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1364 * ice_aq_add_update_mir_rule - add/update a mirror rule
1365 * @hw: pointer to the HW struct
1366 * @rule_type: Rule Type
1367 * @dest_vsi: VSI number to which packets will be mirrored
1368 * @count: length of the list
1369 * @mr_buf: buffer for list of mirrored VSI numbers
1370 * @cd: pointer to command details structure or NULL
1373 * Add/Update Mirror Rule (0x260).
1376 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1377 u16 count, struct ice_mir_rule_buf *mr_buf,
1378 struct ice_sq_cd *cd, u16 *rule_id)
1380 struct ice_aqc_add_update_mir_rule *cmd;
1381 struct ice_aq_desc desc;
1382 enum ice_status status;
1383 __le16 *mr_list = NULL;
1386 switch (rule_type) {
1387 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1388 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1389 /* Make sure count and mr_buf are set for these rule_types */
1390 if (!(count && mr_buf))
1391 return ICE_ERR_PARAM;
1393 buf_size = count * sizeof(__le16);
1394 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1396 return ICE_ERR_NO_MEMORY;
1398 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1399 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1400 /* Make sure count and mr_buf are not set for these
1403 if (count || mr_buf)
1404 return ICE_ERR_PARAM;
1407 ice_debug(hw, ICE_DBG_SW,
1408 "Error due to unsupported rule_type %u\n", rule_type);
1409 return ICE_ERR_OUT_OF_RANGE;
1412 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1414 /* Pre-process 'mr_buf' items for add/update of virtual port
1415 * ingress/egress mirroring (but not physical port ingress/egress
1421 for (i = 0; i < count; i++) {
1424 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1426 /* Validate specified VSI number, make sure it is less
1427 * than ICE_MAX_VSI, if not return with error.
1429 if (id >= ICE_MAX_VSI) {
1430 ice_debug(hw, ICE_DBG_SW,
1431 "Error VSI index (%u) out-of-range\n",
1433 ice_free(hw, mr_list);
1434 return ICE_ERR_OUT_OF_RANGE;
1437 /* add VSI to mirror rule */
1440 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1441 else /* remove VSI from mirror rule */
1442 mr_list[i] = CPU_TO_LE16(id);
1446 cmd = &desc.params.add_update_rule;
1447 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1448 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1449 ICE_AQC_RULE_ID_VALID_M);
1450 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1451 cmd->num_entries = CPU_TO_LE16(count);
1452 cmd->dest = CPU_TO_LE16(dest_vsi);
1454 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1456 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1458 ice_free(hw, mr_list);
1464 * ice_aq_delete_mir_rule - delete a mirror rule
1465 * @hw: pointer to the HW struct
1466 * @rule_id: Mirror rule ID (to be deleted)
1467 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1468 * otherwise it is returned to the shared pool
1469 * @cd: pointer to command details structure or NULL
1471 * Delete Mirror Rule (0x261).
1474 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1475 struct ice_sq_cd *cd)
1477 struct ice_aqc_delete_mir_rule *cmd;
1478 struct ice_aq_desc desc;
1480 /* rule_id should be in the range 0...63 */
1481 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1482 return ICE_ERR_OUT_OF_RANGE;
1484 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1486 cmd = &desc.params.del_rule;
1487 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1488 cmd->rule_id = CPU_TO_LE16(rule_id);
1491 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1493 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1497 * ice_aq_alloc_free_vsi_list
1498 * @hw: pointer to the HW struct
1499 * @vsi_list_id: VSI list ID returned or used for lookup
1500 * @lkup_type: switch rule filter lookup type
1501 * @opc: switch rules population command type - pass in the command opcode
1503 * allocates or free a VSI list resource
1505 static enum ice_status
1506 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1507 enum ice_sw_lkup_type lkup_type,
1508 enum ice_adminq_opc opc)
1510 struct ice_aqc_alloc_free_res_elem *sw_buf;
1511 struct ice_aqc_res_elem *vsi_ele;
1512 enum ice_status status;
1515 buf_len = sizeof(*sw_buf);
1516 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1517 ice_malloc(hw, buf_len);
1519 return ICE_ERR_NO_MEMORY;
1520 sw_buf->num_elems = CPU_TO_LE16(1);
1522 if (lkup_type == ICE_SW_LKUP_MAC ||
1523 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1524 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1525 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1526 lkup_type == ICE_SW_LKUP_PROMISC ||
1527 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1528 lkup_type == ICE_SW_LKUP_LAST) {
1529 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1530 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1532 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1534 status = ICE_ERR_PARAM;
1535 goto ice_aq_alloc_free_vsi_list_exit;
1538 if (opc == ice_aqc_opc_free_res)
1539 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1541 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1543 goto ice_aq_alloc_free_vsi_list_exit;
1545 if (opc == ice_aqc_opc_alloc_res) {
1546 vsi_ele = &sw_buf->elem[0];
1547 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1550 ice_aq_alloc_free_vsi_list_exit:
1551 ice_free(hw, sw_buf);
1556 * ice_aq_set_storm_ctrl - Sets storm control configuration
1557 * @hw: pointer to the HW struct
1558 * @bcast_thresh: represents the upper threshold for broadcast storm control
1559 * @mcast_thresh: represents the upper threshold for multicast storm control
1560 * @ctl_bitmask: storm control control knobs
1562 * Sets the storm control configuration (0x0280)
1565 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1568 struct ice_aqc_storm_cfg *cmd;
1569 struct ice_aq_desc desc;
1571 cmd = &desc.params.storm_conf;
1573 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1575 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1576 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1577 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1579 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1583 * ice_aq_get_storm_ctrl - gets storm control configuration
1584 * @hw: pointer to the HW struct
1585 * @bcast_thresh: represents the upper threshold for broadcast storm control
1586 * @mcast_thresh: represents the upper threshold for multicast storm control
1587 * @ctl_bitmask: storm control control knobs
1589 * Gets the storm control configuration (0x0281)
1592 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1595 enum ice_status status;
1596 struct ice_aq_desc desc;
1598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1600 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1602 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1605 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1608 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1611 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1618 * ice_aq_sw_rules - add/update/remove switch rules
1619 * @hw: pointer to the HW struct
1620 * @rule_list: pointer to switch rule population list
1621 * @rule_list_sz: total size of the rule list in bytes
1622 * @num_rules: number of switch rules in the rule_list
1623 * @opc: switch rules population command type - pass in the command opcode
1624 * @cd: pointer to command details structure or NULL
1626 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1628 static enum ice_status
1629 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1630 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1632 struct ice_aq_desc desc;
1634 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1636 if (opc != ice_aqc_opc_add_sw_rules &&
1637 opc != ice_aqc_opc_update_sw_rules &&
1638 opc != ice_aqc_opc_remove_sw_rules)
1639 return ICE_ERR_PARAM;
1641 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1643 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1644 desc.params.sw_rules.num_rules_fltr_entry_index =
1645 CPU_TO_LE16(num_rules);
1646 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1650 * ice_aq_add_recipe - add switch recipe
1651 * @hw: pointer to the HW struct
1652 * @s_recipe_list: pointer to switch rule population list
1653 * @num_recipes: number of switch recipes in the list
1654 * @cd: pointer to command details structure or NULL
1659 ice_aq_add_recipe(struct ice_hw *hw,
1660 struct ice_aqc_recipe_data_elem *s_recipe_list,
1661 u16 num_recipes, struct ice_sq_cd *cd)
1663 struct ice_aqc_add_get_recipe *cmd;
1664 struct ice_aq_desc desc;
1667 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1668 cmd = &desc.params.add_get_recipe;
1669 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1671 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1672 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1674 buf_size = num_recipes * sizeof(*s_recipe_list);
1676 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1680 * ice_aq_get_recipe - get switch recipe
1681 * @hw: pointer to the HW struct
1682 * @s_recipe_list: pointer to switch rule population list
1683 * @num_recipes: pointer to the number of recipes (input and output)
1684 * @recipe_root: root recipe number of recipe(s) to retrieve
1685 * @cd: pointer to command details structure or NULL
1689 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1690 * On output, *num_recipes will equal the number of entries returned in
1693 * The caller must supply enough space in s_recipe_list to hold all possible
1694 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1697 ice_aq_get_recipe(struct ice_hw *hw,
1698 struct ice_aqc_recipe_data_elem *s_recipe_list,
1699 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1701 struct ice_aqc_add_get_recipe *cmd;
1702 struct ice_aq_desc desc;
1703 enum ice_status status;
1706 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1707 return ICE_ERR_PARAM;
1709 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1710 cmd = &desc.params.add_get_recipe;
1711 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1713 cmd->return_index = CPU_TO_LE16(recipe_root);
1714 cmd->num_sub_recipes = 0;
1716 buf_size = *num_recipes * sizeof(*s_recipe_list);
1718 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1719 /* cppcheck-suppress constArgument */
1720 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1726 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1727 * @hw: pointer to the HW struct
1728 * @profile_id: package profile ID to associate the recipe with
1729 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1730 * @cd: pointer to command details structure or NULL
1731 * Recipe to profile association (0x0291)
1734 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1735 struct ice_sq_cd *cd)
1737 struct ice_aqc_recipe_to_profile *cmd;
1738 struct ice_aq_desc desc;
1740 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1741 cmd = &desc.params.recipe_to_profile;
1742 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1743 cmd->profile_id = CPU_TO_LE16(profile_id);
1744 /* Set the recipe ID bit in the bitmask to let the device know which
1745 * profile we are associating the recipe to
1747 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1748 ICE_NONDMA_TO_NONDMA);
1750 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1754 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1755 * @hw: pointer to the HW struct
1756 * @profile_id: package profile ID to associate the recipe with
1757 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1758 * @cd: pointer to command details structure or NULL
1759 * Associate profile ID with given recipe (0x0293)
1762 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1763 struct ice_sq_cd *cd)
1765 struct ice_aqc_recipe_to_profile *cmd;
1766 struct ice_aq_desc desc;
1767 enum ice_status status;
1769 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1770 cmd = &desc.params.recipe_to_profile;
1771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1772 cmd->profile_id = CPU_TO_LE16(profile_id);
1774 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1776 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1777 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1783 * ice_alloc_recipe - add recipe resource
1784 * @hw: pointer to the hardware structure
1785 * @rid: recipe ID returned as response to AQ call
1787 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1789 struct ice_aqc_alloc_free_res_elem *sw_buf;
1790 enum ice_status status;
1793 buf_len = sizeof(*sw_buf);
1794 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1796 return ICE_ERR_NO_MEMORY;
1798 sw_buf->num_elems = CPU_TO_LE16(1);
1799 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1800 ICE_AQC_RES_TYPE_S) |
1801 ICE_AQC_RES_TYPE_FLAG_SHARED);
1802 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1803 ice_aqc_opc_alloc_res, NULL);
1805 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1806 ice_free(hw, sw_buf);
1811 /* ice_init_port_info - Initialize port_info with switch configuration data
1812 * @pi: pointer to port_info
1813 * @vsi_port_num: VSI number or port number
1814 * @type: Type of switch element (port or VSI)
1815 * @swid: switch ID of the switch the element is attached to
1816 * @pf_vf_num: PF or VF number
1817 * @is_vf: true if the element is a VF, false otherwise
1820 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1821 u16 swid, u16 pf_vf_num, bool is_vf)
1824 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1825 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1827 pi->pf_vf_num = pf_vf_num;
1829 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1830 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1833 ice_debug(pi->hw, ICE_DBG_SW,
1834 "incorrect VSI/port type received\n");
1839 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1840 * @hw: pointer to the hardware structure
1842 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1844 struct ice_aqc_get_sw_cfg_resp *rbuf;
1845 enum ice_status status;
1852 num_total_ports = 1;
1854 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1855 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1858 return ICE_ERR_NO_MEMORY;
1860 /* Multiple calls to ice_aq_get_sw_cfg may be required
1861 * to get all the switch configuration information. The need
1862 * for additional calls is indicated by ice_aq_get_sw_cfg
1863 * writing a non-zero value in req_desc
1866 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1867 &req_desc, &num_elems, NULL);
1872 for (i = 0; i < num_elems; i++) {
1873 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1874 u16 pf_vf_num, swid, vsi_port_num;
1878 ele = rbuf[i].elements;
1879 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1880 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1882 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1883 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1885 swid = LE16_TO_CPU(ele->swid);
1887 if (LE16_TO_CPU(ele->pf_vf_num) &
1888 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1891 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1892 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1895 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1896 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1897 if (j == num_total_ports) {
1898 ice_debug(hw, ICE_DBG_SW,
1899 "more ports than expected\n");
1900 status = ICE_ERR_CFG;
1903 ice_init_port_info(hw->port_info,
1904 vsi_port_num, res_type, swid,
1912 } while (req_desc && !status);
1915 ice_free(hw, (void *)rbuf);
1920 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1921 * @hw: pointer to the hardware structure
1922 * @fi: filter info structure to fill/update
1924 * This helper function populates the lb_en and lan_en elements of the provided
1925 * ice_fltr_info struct using the switch's type and characteristics of the
1926 * switch rule being configured.
1928 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1932 if ((fi->flag & ICE_FLTR_TX) &&
1933 (fi->fltr_act == ICE_FWD_TO_VSI ||
1934 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1935 fi->fltr_act == ICE_FWD_TO_Q ||
1936 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1937 /* Setting LB for prune actions will result in replicated
1938 * packets to the internal switch that will be dropped.
1940 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1943 /* Set lan_en to TRUE if
1944 * 1. The switch is a VEB AND
1946 * 2.1 The lookup is a directional lookup like ethertype,
1947 * promiscuous, ethertype-MAC, promiscuous-VLAN
1948 * and default-port OR
1949 * 2.2 The lookup is VLAN, OR
1950 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1951 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1955 * The switch is a VEPA.
1957 * In all other cases, the LAN enable has to be set to false.
1960 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1961 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1962 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1963 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1964 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1965 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1966 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1967 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1968 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1969 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1978 * ice_fill_sw_rule - Helper function to fill switch rule structure
1979 * @hw: pointer to the hardware structure
1980 * @f_info: entry containing packet forwarding information
1981 * @s_rule: switch rule structure to be filled in based on mac_entry
1982 * @opc: switch rules population command type - pass in the command opcode
1985 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1986 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1988 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1996 if (opc == ice_aqc_opc_remove_sw_rules) {
1997 s_rule->pdata.lkup_tx_rx.act = 0;
1998 s_rule->pdata.lkup_tx_rx.index =
1999 CPU_TO_LE16(f_info->fltr_rule_id);
2000 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2004 eth_hdr_sz = sizeof(dummy_eth_header);
2005 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2007 /* initialize the ether header with a dummy header */
2008 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2009 ice_fill_sw_info(hw, f_info);
2011 switch (f_info->fltr_act) {
2012 case ICE_FWD_TO_VSI:
2013 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2014 ICE_SINGLE_ACT_VSI_ID_M;
2015 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2016 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2017 ICE_SINGLE_ACT_VALID_BIT;
2019 case ICE_FWD_TO_VSI_LIST:
2020 act |= ICE_SINGLE_ACT_VSI_LIST;
2021 act |= (f_info->fwd_id.vsi_list_id <<
2022 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2023 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2024 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2025 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2026 ICE_SINGLE_ACT_VALID_BIT;
2029 act |= ICE_SINGLE_ACT_TO_Q;
2030 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2031 ICE_SINGLE_ACT_Q_INDEX_M;
2033 case ICE_DROP_PACKET:
2034 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2035 ICE_SINGLE_ACT_VALID_BIT;
2037 case ICE_FWD_TO_QGRP:
2038 q_rgn = f_info->qgrp_size > 0 ?
2039 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2040 act |= ICE_SINGLE_ACT_TO_Q;
2041 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2042 ICE_SINGLE_ACT_Q_INDEX_M;
2043 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2044 ICE_SINGLE_ACT_Q_REGION_M;
2051 act |= ICE_SINGLE_ACT_LB_ENABLE;
2053 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2055 switch (f_info->lkup_type) {
2056 case ICE_SW_LKUP_MAC:
2057 daddr = f_info->l_data.mac.mac_addr;
2059 case ICE_SW_LKUP_VLAN:
2060 vlan_id = f_info->l_data.vlan.vlan_id;
2061 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2062 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2063 act |= ICE_SINGLE_ACT_PRUNE;
2064 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2067 case ICE_SW_LKUP_ETHERTYPE_MAC:
2068 daddr = f_info->l_data.ethertype_mac.mac_addr;
2070 case ICE_SW_LKUP_ETHERTYPE:
2071 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2072 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2074 case ICE_SW_LKUP_MAC_VLAN:
2075 daddr = f_info->l_data.mac_vlan.mac_addr;
2076 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2078 case ICE_SW_LKUP_PROMISC_VLAN:
2079 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2081 case ICE_SW_LKUP_PROMISC:
2082 daddr = f_info->l_data.mac_vlan.mac_addr;
2088 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2089 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2090 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2092 /* Recipe set depending on lookup type */
2093 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2094 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2095 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2098 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2099 ICE_NONDMA_TO_NONDMA);
2101 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2102 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2103 *off = CPU_TO_BE16(vlan_id);
2106 /* Create the switch rule with the final dummy Ethernet header */
2107 if (opc != ice_aqc_opc_update_sw_rules)
2108 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2112 * ice_add_marker_act
2113 * @hw: pointer to the hardware structure
2114 * @m_ent: the management entry for which sw marker needs to be added
2115 * @sw_marker: sw marker to tag the Rx descriptor with
2116 * @l_id: large action resource ID
2118 * Create a large action to hold software marker and update the switch rule
2119 * entry pointed by m_ent with newly created large action
2121 static enum ice_status
2122 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2123 u16 sw_marker, u16 l_id)
2125 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2126 /* For software marker we need 3 large actions
2127 * 1. FWD action: FWD TO VSI or VSI LIST
2128 * 2. GENERIC VALUE action to hold the profile ID
2129 * 3. GENERIC VALUE action to hold the software marker ID
2131 const u16 num_lg_acts = 3;
2132 enum ice_status status;
2138 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2139 return ICE_ERR_PARAM;
2141 /* Create two back-to-back switch rules and submit them to the HW using
2142 * one memory buffer:
2146 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2147 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2148 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2150 return ICE_ERR_NO_MEMORY;
2152 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2154 /* Fill in the first switch rule i.e. large action */
2155 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2156 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2157 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2159 /* First action VSI forwarding or VSI list forwarding depending on how
2162 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2163 m_ent->fltr_info.fwd_id.hw_vsi_id;
2165 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2166 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2167 ICE_LG_ACT_VSI_LIST_ID_M;
2168 if (m_ent->vsi_count > 1)
2169 act |= ICE_LG_ACT_VSI_LIST;
2170 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2172 /* Second action descriptor type */
2173 act = ICE_LG_ACT_GENERIC;
2175 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2176 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2178 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2179 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2181 /* Third action Marker value */
2182 act |= ICE_LG_ACT_GENERIC;
2183 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2184 ICE_LG_ACT_GENERIC_VALUE_M;
2186 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2188 /* call the fill switch rule to fill the lookup Tx Rx structure */
2189 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2190 ice_aqc_opc_update_sw_rules);
2192 /* Update the action to point to the large action ID */
2193 rx_tx->pdata.lkup_tx_rx.act =
2194 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2195 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2196 ICE_SINGLE_ACT_PTR_VAL_M));
2198 /* Use the filter rule ID of the previously created rule with single
2199 * act. Once the update happens, hardware will treat this as large
2202 rx_tx->pdata.lkup_tx_rx.index =
2203 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2205 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2206 ice_aqc_opc_update_sw_rules, NULL);
2208 m_ent->lg_act_idx = l_id;
2209 m_ent->sw_marker_id = sw_marker;
2212 ice_free(hw, lg_act);
2217 * ice_add_counter_act - add/update filter rule with counter action
2218 * @hw: pointer to the hardware structure
2219 * @m_ent: the management entry for which counter needs to be added
2220 * @counter_id: VLAN counter ID returned as part of allocate resource
2221 * @l_id: large action resource ID
2223 static enum ice_status
2224 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2225 u16 counter_id, u16 l_id)
2227 struct ice_aqc_sw_rules_elem *lg_act;
2228 struct ice_aqc_sw_rules_elem *rx_tx;
2229 enum ice_status status;
2230 /* 2 actions will be added while adding a large action counter */
2231 const int num_acts = 2;
2238 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2239 return ICE_ERR_PARAM;
2241 /* Create two back-to-back switch rules and submit them to the HW using
2242 * one memory buffer:
2246 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2247 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2248 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2251 return ICE_ERR_NO_MEMORY;
2253 rx_tx = (struct ice_aqc_sw_rules_elem *)
2254 ((u8 *)lg_act + lg_act_size);
2256 /* Fill in the first switch rule i.e. large action */
2257 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2258 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2259 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2261 /* First action VSI forwarding or VSI list forwarding depending on how
2264 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2265 m_ent->fltr_info.fwd_id.hw_vsi_id;
2267 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2268 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2269 ICE_LG_ACT_VSI_LIST_ID_M;
2270 if (m_ent->vsi_count > 1)
2271 act |= ICE_LG_ACT_VSI_LIST;
2272 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2274 /* Second action counter ID */
2275 act = ICE_LG_ACT_STAT_COUNT;
2276 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2277 ICE_LG_ACT_STAT_COUNT_M;
2278 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2280 /* call the fill switch rule to fill the lookup Tx Rx structure */
2281 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2282 ice_aqc_opc_update_sw_rules);
2284 act = ICE_SINGLE_ACT_PTR;
2285 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2286 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2288 /* Use the filter rule ID of the previously created rule with single
2289 * act. Once the update happens, hardware will treat this as large
2292 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2293 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2295 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2296 ice_aqc_opc_update_sw_rules, NULL);
2298 m_ent->lg_act_idx = l_id;
2299 m_ent->counter_index = counter_id;
2302 ice_free(hw, lg_act);
2307 * ice_create_vsi_list_map
2308 * @hw: pointer to the hardware structure
2309 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2310 * @num_vsi: number of VSI handles in the array
2311 * @vsi_list_id: VSI list ID generated as part of allocate resource
2313 * Helper function to create a new entry of VSI list ID to VSI mapping
2314 * using the given VSI list ID
2316 static struct ice_vsi_list_map_info *
2317 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2320 struct ice_switch_info *sw = hw->switch_info;
2321 struct ice_vsi_list_map_info *v_map;
2324 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2329 v_map->vsi_list_id = vsi_list_id;
2331 for (i = 0; i < num_vsi; i++)
2332 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2334 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2339 * ice_update_vsi_list_rule
2340 * @hw: pointer to the hardware structure
2341 * @vsi_handle_arr: array of VSI handles to form a VSI list
2342 * @num_vsi: number of VSI handles in the array
2343 * @vsi_list_id: VSI list ID generated as part of allocate resource
2344 * @remove: Boolean value to indicate if this is a remove action
2345 * @opc: switch rules population command type - pass in the command opcode
2346 * @lkup_type: lookup type of the filter
2348 * Call AQ command to add a new switch rule or update existing switch rule
2349 * using the given VSI list ID
2351 static enum ice_status
2352 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2353 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2354 enum ice_sw_lkup_type lkup_type)
2356 struct ice_aqc_sw_rules_elem *s_rule;
2357 enum ice_status status;
2363 return ICE_ERR_PARAM;
2365 if (lkup_type == ICE_SW_LKUP_MAC ||
2366 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2367 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2368 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2369 lkup_type == ICE_SW_LKUP_PROMISC ||
2370 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2371 lkup_type == ICE_SW_LKUP_LAST)
2372 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2373 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2374 else if (lkup_type == ICE_SW_LKUP_VLAN)
2375 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2376 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2378 return ICE_ERR_PARAM;
2380 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2381 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2383 return ICE_ERR_NO_MEMORY;
2384 for (i = 0; i < num_vsi; i++) {
2385 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2386 status = ICE_ERR_PARAM;
2389 /* AQ call requires hw_vsi_id(s) */
2390 s_rule->pdata.vsi_list.vsi[i] =
2391 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2394 s_rule->type = CPU_TO_LE16(rule_type);
2395 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2396 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2398 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2401 ice_free(hw, s_rule);
2406 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2407 * @hw: pointer to the HW struct
2408 * @vsi_handle_arr: array of VSI handles to form a VSI list
2409 * @num_vsi: number of VSI handles in the array
2410 * @vsi_list_id: stores the ID of the VSI list to be created
2411 * @lkup_type: switch rule filter's lookup type
2413 static enum ice_status
2414 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2415 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2417 enum ice_status status;
2419 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2420 ice_aqc_opc_alloc_res);
2424 /* Update the newly created VSI list to include the specified VSIs */
2425 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2426 *vsi_list_id, false,
2427 ice_aqc_opc_add_sw_rules, lkup_type);
2431 * ice_create_pkt_fwd_rule
2432 * @hw: pointer to the hardware structure
2433 * @recp_list: corresponding filter management list
2434 * @f_entry: entry containing packet forwarding information
2436 * Create switch rule with given filter information and add an entry
2437 * to the corresponding filter management list to track this switch rule
2440 static enum ice_status
2441 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2442 struct ice_fltr_list_entry *f_entry)
2444 struct ice_fltr_mgmt_list_entry *fm_entry;
2445 struct ice_aqc_sw_rules_elem *s_rule;
2446 enum ice_status status;
2448 s_rule = (struct ice_aqc_sw_rules_elem *)
2449 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2451 return ICE_ERR_NO_MEMORY;
2452 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2453 ice_malloc(hw, sizeof(*fm_entry));
2455 status = ICE_ERR_NO_MEMORY;
2456 goto ice_create_pkt_fwd_rule_exit;
2459 fm_entry->fltr_info = f_entry->fltr_info;
2461 /* Initialize all the fields for the management entry */
2462 fm_entry->vsi_count = 1;
2463 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2464 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2465 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2467 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2468 ice_aqc_opc_add_sw_rules);
2470 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2471 ice_aqc_opc_add_sw_rules, NULL);
2473 ice_free(hw, fm_entry);
2474 goto ice_create_pkt_fwd_rule_exit;
2477 f_entry->fltr_info.fltr_rule_id =
2478 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2479 fm_entry->fltr_info.fltr_rule_id =
2480 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2482 /* The book keeping entries will get removed when base driver
2483 * calls remove filter AQ command
2485 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2487 ice_create_pkt_fwd_rule_exit:
2488 ice_free(hw, s_rule);
2493 * ice_update_pkt_fwd_rule
2494 * @hw: pointer to the hardware structure
2495 * @f_info: filter information for switch rule
2497 * Call AQ command to update a previously created switch rule with a
2500 static enum ice_status
2501 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2503 struct ice_aqc_sw_rules_elem *s_rule;
2504 enum ice_status status;
2506 s_rule = (struct ice_aqc_sw_rules_elem *)
2507 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2509 return ICE_ERR_NO_MEMORY;
2511 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2513 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2515 /* Update switch rule with new rule set to forward VSI list */
2516 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2517 ice_aqc_opc_update_sw_rules, NULL);
2519 ice_free(hw, s_rule);
2524 * ice_update_sw_rule_bridge_mode
2525 * @hw: pointer to the HW struct
2527 * Updates unicast switch filter rules based on VEB/VEPA mode
2529 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2531 struct ice_switch_info *sw = hw->switch_info;
2532 struct ice_fltr_mgmt_list_entry *fm_entry;
2533 enum ice_status status = ICE_SUCCESS;
2534 struct LIST_HEAD_TYPE *rule_head;
2535 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2537 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2538 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2540 ice_acquire_lock(rule_lock);
2541 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2543 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2544 u8 *addr = fi->l_data.mac.mac_addr;
2546 /* Update unicast Tx rules to reflect the selected
2549 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2550 (fi->fltr_act == ICE_FWD_TO_VSI ||
2551 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2552 fi->fltr_act == ICE_FWD_TO_Q ||
2553 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2554 status = ice_update_pkt_fwd_rule(hw, fi);
2560 ice_release_lock(rule_lock);
2566 * ice_add_update_vsi_list
2567 * @hw: pointer to the hardware structure
2568 * @m_entry: pointer to current filter management list entry
2569 * @cur_fltr: filter information from the book keeping entry
2570 * @new_fltr: filter information with the new VSI to be added
2572 * Call AQ command to add or update previously created VSI list with new VSI.
2574 * Helper function to do book keeping associated with adding filter information
2575 * The algorithm to do the book keeping is described below :
2576 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2577 * if only one VSI has been added till now
2578 * Allocate a new VSI list and add two VSIs
2579 * to this list using switch rule command
2580 * Update the previously created switch rule with the
2581 * newly created VSI list ID
2582 * if a VSI list was previously created
2583 * Add the new VSI to the previously created VSI list set
2584 * using the update switch rule command
2586 static enum ice_status
2587 ice_add_update_vsi_list(struct ice_hw *hw,
2588 struct ice_fltr_mgmt_list_entry *m_entry,
2589 struct ice_fltr_info *cur_fltr,
2590 struct ice_fltr_info *new_fltr)
2592 enum ice_status status = ICE_SUCCESS;
2593 u16 vsi_list_id = 0;
2595 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2596 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2597 return ICE_ERR_NOT_IMPL;
2599 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2600 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2601 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2602 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2603 return ICE_ERR_NOT_IMPL;
2605 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2606 /* Only one entry existed in the mapping and it was not already
2607 * a part of a VSI list. So, create a VSI list with the old and
2610 struct ice_fltr_info tmp_fltr;
2611 u16 vsi_handle_arr[2];
2613 /* A rule already exists with the new VSI being added */
2614 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2615 return ICE_ERR_ALREADY_EXISTS;
2617 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2618 vsi_handle_arr[1] = new_fltr->vsi_handle;
2619 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2621 new_fltr->lkup_type);
2625 tmp_fltr = *new_fltr;
2626 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2627 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2628 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2629 /* Update the previous switch rule of "MAC forward to VSI" to
2630 * "MAC fwd to VSI list"
2632 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2636 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2637 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2638 m_entry->vsi_list_info =
2639 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2642 /* If this entry was large action then the large action needs
2643 * to be updated to point to FWD to VSI list
2645 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2647 ice_add_marker_act(hw, m_entry,
2648 m_entry->sw_marker_id,
2649 m_entry->lg_act_idx);
2651 u16 vsi_handle = new_fltr->vsi_handle;
2652 enum ice_adminq_opc opcode;
2654 if (!m_entry->vsi_list_info)
2657 /* A rule already exists with the new VSI being added */
2658 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2661 /* Update the previously created VSI list set with
2662 * the new VSI ID passed in
2664 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2665 opcode = ice_aqc_opc_update_sw_rules;
2667 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2668 vsi_list_id, false, opcode,
2669 new_fltr->lkup_type);
2670 /* update VSI list mapping info with new VSI ID */
2672 ice_set_bit(vsi_handle,
2673 m_entry->vsi_list_info->vsi_map);
2676 m_entry->vsi_count++;
2681 * ice_find_rule_entry - Search a rule entry
2682 * @list_head: head of rule list
2683 * @f_info: rule information
2685 * Helper function to search for a given rule entry
2686 * Returns pointer to entry storing the rule if found
2688 static struct ice_fltr_mgmt_list_entry *
2689 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2690 struct ice_fltr_info *f_info)
2692 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2694 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2696 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2697 sizeof(f_info->l_data)) &&
2698 f_info->flag == list_itr->fltr_info.flag) {
2707 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2708 * @recp_list: VSI lists needs to be searched
2709 * @vsi_handle: VSI handle to be found in VSI list
2710 * @vsi_list_id: VSI list ID found containing vsi_handle
2712 * Helper function to search a VSI list with single entry containing given VSI
2713 * handle element. This can be extended further to search VSI list with more
2714 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2716 static struct ice_vsi_list_map_info *
2717 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2720 struct ice_vsi_list_map_info *map_info = NULL;
2721 struct LIST_HEAD_TYPE *list_head;
2723 list_head = &recp_list->filt_rules;
2724 if (recp_list->adv_rule) {
2725 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2727 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2728 ice_adv_fltr_mgmt_list_entry,
2730 if (list_itr->vsi_list_info) {
2731 map_info = list_itr->vsi_list_info;
2732 if (ice_is_bit_set(map_info->vsi_map,
2734 *vsi_list_id = map_info->vsi_list_id;
2740 struct ice_fltr_mgmt_list_entry *list_itr;
2742 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2743 ice_fltr_mgmt_list_entry,
2745 if (list_itr->vsi_count == 1 &&
2746 list_itr->vsi_list_info) {
2747 map_info = list_itr->vsi_list_info;
2748 if (ice_is_bit_set(map_info->vsi_map,
2750 *vsi_list_id = map_info->vsi_list_id;
2760 * ice_add_rule_internal - add rule for a given lookup type
2761 * @hw: pointer to the hardware structure
2762 * @recp_list: recipe list for which rule has to be added
2763 * @lport: logic port number on which function add rule
2764 * @f_entry: structure containing MAC forwarding information
2766 * Adds or updates the rule lists for a given recipe
2768 static enum ice_status
2769 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2770 u8 lport, struct ice_fltr_list_entry *f_entry)
2772 struct ice_fltr_info *new_fltr, *cur_fltr;
2773 struct ice_fltr_mgmt_list_entry *m_entry;
2774 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2775 enum ice_status status = ICE_SUCCESS;
2777 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2778 return ICE_ERR_PARAM;
2780 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2781 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2782 f_entry->fltr_info.fwd_id.hw_vsi_id =
2783 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2785 rule_lock = &recp_list->filt_rule_lock;
2787 ice_acquire_lock(rule_lock);
2788 new_fltr = &f_entry->fltr_info;
2789 if (new_fltr->flag & ICE_FLTR_RX)
2790 new_fltr->src = lport;
2791 else if (new_fltr->flag & ICE_FLTR_TX)
2793 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2795 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2797 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2798 goto exit_add_rule_internal;
2801 cur_fltr = &m_entry->fltr_info;
2802 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2804 exit_add_rule_internal:
2805 ice_release_lock(rule_lock);
2810 * ice_remove_vsi_list_rule
2811 * @hw: pointer to the hardware structure
2812 * @vsi_list_id: VSI list ID generated as part of allocate resource
2813 * @lkup_type: switch rule filter lookup type
2815 * The VSI list should be emptied before this function is called to remove the
2818 static enum ice_status
2819 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2820 enum ice_sw_lkup_type lkup_type)
2822 struct ice_aqc_sw_rules_elem *s_rule;
2823 enum ice_status status;
2826 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2827 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2829 return ICE_ERR_NO_MEMORY;
2831 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2832 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2834 /* Free the vsi_list resource that we allocated. It is assumed that the
2835 * list is empty at this point.
2837 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2838 ice_aqc_opc_free_res);
2840 ice_free(hw, s_rule);
2845 * ice_rem_update_vsi_list
2846 * @hw: pointer to the hardware structure
2847 * @vsi_handle: VSI handle of the VSI to remove
2848 * @fm_list: filter management entry for which the VSI list management needs to
2851 static enum ice_status
2852 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2853 struct ice_fltr_mgmt_list_entry *fm_list)
2855 enum ice_sw_lkup_type lkup_type;
2856 enum ice_status status = ICE_SUCCESS;
2859 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2860 fm_list->vsi_count == 0)
2861 return ICE_ERR_PARAM;
2863 /* A rule with the VSI being removed does not exist */
2864 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2865 return ICE_ERR_DOES_NOT_EXIST;
2867 lkup_type = fm_list->fltr_info.lkup_type;
2868 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2869 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2870 ice_aqc_opc_update_sw_rules,
2875 fm_list->vsi_count--;
2876 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2878 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2879 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2880 struct ice_vsi_list_map_info *vsi_list_info =
2881 fm_list->vsi_list_info;
2884 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2886 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2887 return ICE_ERR_OUT_OF_RANGE;
2889 /* Make sure VSI list is empty before removing it below */
2890 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2892 ice_aqc_opc_update_sw_rules,
2897 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2898 tmp_fltr_info.fwd_id.hw_vsi_id =
2899 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2900 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2901 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2903 ice_debug(hw, ICE_DBG_SW,
2904 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2905 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2909 fm_list->fltr_info = tmp_fltr_info;
2912 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2913 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2914 struct ice_vsi_list_map_info *vsi_list_info =
2915 fm_list->vsi_list_info;
2917 /* Remove the VSI list since it is no longer used */
2918 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2920 ice_debug(hw, ICE_DBG_SW,
2921 "Failed to remove VSI list %d, error %d\n",
2922 vsi_list_id, status);
2926 LIST_DEL(&vsi_list_info->list_entry);
2927 ice_free(hw, vsi_list_info);
2928 fm_list->vsi_list_info = NULL;
2935 * ice_remove_rule_internal - Remove a filter rule of a given type
2937 * @hw: pointer to the hardware structure
2938 * @recp_list: recipe list for which the rule needs to removed
2939 * @f_entry: rule entry containing filter information
2941 static enum ice_status
2942 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2943 struct ice_fltr_list_entry *f_entry)
2945 struct ice_fltr_mgmt_list_entry *list_elem;
2946 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2947 enum ice_status status = ICE_SUCCESS;
2948 bool remove_rule = false;
2951 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2952 return ICE_ERR_PARAM;
2953 f_entry->fltr_info.fwd_id.hw_vsi_id =
2954 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2956 rule_lock = &recp_list->filt_rule_lock;
2957 ice_acquire_lock(rule_lock);
2958 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2959 &f_entry->fltr_info);
2961 status = ICE_ERR_DOES_NOT_EXIST;
2965 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2967 } else if (!list_elem->vsi_list_info) {
2968 status = ICE_ERR_DOES_NOT_EXIST;
2970 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2971 /* a ref_cnt > 1 indicates that the vsi_list is being
2972 * shared by multiple rules. Decrement the ref_cnt and
2973 * remove this rule, but do not modify the list, as it
2974 * is in-use by other rules.
2976 list_elem->vsi_list_info->ref_cnt--;
2979 /* a ref_cnt of 1 indicates the vsi_list is only used
2980 * by one rule. However, the original removal request is only
2981 * for a single VSI. Update the vsi_list first, and only
2982 * remove the rule if there are no further VSIs in this list.
2984 vsi_handle = f_entry->fltr_info.vsi_handle;
2985 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2988 /* if VSI count goes to zero after updating the VSI list */
2989 if (list_elem->vsi_count == 0)
2994 /* Remove the lookup rule */
2995 struct ice_aqc_sw_rules_elem *s_rule;
2997 s_rule = (struct ice_aqc_sw_rules_elem *)
2998 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3000 status = ICE_ERR_NO_MEMORY;
3004 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3005 ice_aqc_opc_remove_sw_rules);
3007 status = ice_aq_sw_rules(hw, s_rule,
3008 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3009 ice_aqc_opc_remove_sw_rules, NULL);
3011 /* Remove a book keeping from the list */
3012 ice_free(hw, s_rule);
3017 LIST_DEL(&list_elem->list_entry);
3018 ice_free(hw, list_elem);
3021 ice_release_lock(rule_lock);
3026 * ice_aq_get_res_alloc - get allocated resources
3027 * @hw: pointer to the HW struct
3028 * @num_entries: pointer to u16 to store the number of resource entries returned
3029 * @buf: pointer to user-supplied buffer
3030 * @buf_size: size of buff
3031 * @cd: pointer to command details structure or NULL
3033 * The user-supplied buffer must be large enough to store the resource
3034 * information for all resource types. Each resource type is an
3035 * ice_aqc_get_res_resp_data_elem structure.
3038 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3039 u16 buf_size, struct ice_sq_cd *cd)
3041 struct ice_aqc_get_res_alloc *resp;
3042 enum ice_status status;
3043 struct ice_aq_desc desc;
3046 return ICE_ERR_BAD_PTR;
3048 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3049 return ICE_ERR_INVAL_SIZE;
3051 resp = &desc.params.get_res;
3053 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3054 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3056 if (!status && num_entries)
3057 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3063 * ice_aq_get_res_descs - get allocated resource descriptors
3064 * @hw: pointer to the hardware structure
3065 * @num_entries: number of resource entries in buffer
3066 * @buf: Indirect buffer to hold data parameters and response
3067 * @buf_size: size of buffer for indirect commands
3068 * @res_type: resource type
3069 * @res_shared: is resource shared
3070 * @desc_id: input - first desc ID to start; output - next desc ID
3071 * @cd: pointer to command details structure or NULL
3074 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3075 struct ice_aqc_get_allocd_res_desc_resp *buf,
3076 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3077 struct ice_sq_cd *cd)
3079 struct ice_aqc_get_allocd_res_desc *cmd;
3080 struct ice_aq_desc desc;
3081 enum ice_status status;
3083 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3085 cmd = &desc.params.get_res_desc;
3088 return ICE_ERR_PARAM;
3090 if (buf_size != (num_entries * sizeof(*buf)))
3091 return ICE_ERR_PARAM;
3093 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3095 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3096 ICE_AQC_RES_TYPE_M) | (res_shared ?
3097 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3098 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3100 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3102 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3108 * ice_add_mac_rule - Add a MAC address based filter rule
3109 * @hw: pointer to the hardware structure
3110 * @m_list: list of MAC addresses and forwarding information
3111 * @sw: pointer to switch info struct for which function add rule
3112 * @lport: logic port number on which function add rule
3114 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3115 * multiple unicast addresses, the function assumes that all the
3116 * addresses are unique in a given add_mac call. It doesn't
3117 * check for duplicates in this case, removing duplicates from a given
3118 * list should be taken care of in the caller of this function.
3120 static enum ice_status
3121 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3122 struct ice_switch_info *sw, u8 lport)
3124 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3125 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3126 struct ice_fltr_list_entry *m_list_itr;
3127 struct LIST_HEAD_TYPE *rule_head;
3128 u16 total_elem_left, s_rule_size;
3129 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3130 enum ice_status status = ICE_SUCCESS;
3131 u16 num_unicast = 0;
3135 rule_lock = &recp_list->filt_rule_lock;
3136 rule_head = &recp_list->filt_rules;
3138 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3140 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3144 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3145 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3146 if (!ice_is_vsi_valid(hw, vsi_handle))
3147 return ICE_ERR_PARAM;
3148 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3149 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3150 /* update the src in case it is VSI num */
3151 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3152 return ICE_ERR_PARAM;
3153 m_list_itr->fltr_info.src = hw_vsi_id;
3154 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3155 IS_ZERO_ETHER_ADDR(add))
3156 return ICE_ERR_PARAM;
3157 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3158 /* Don't overwrite the unicast address */
3159 ice_acquire_lock(rule_lock);
3160 if (ice_find_rule_entry(rule_head,
3161 &m_list_itr->fltr_info)) {
3162 ice_release_lock(rule_lock);
3163 return ICE_ERR_ALREADY_EXISTS;
3165 ice_release_lock(rule_lock);
3167 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3168 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3169 m_list_itr->status =
3170 ice_add_rule_internal(hw, recp_list, lport,
3172 if (m_list_itr->status)
3173 return m_list_itr->status;
3177 ice_acquire_lock(rule_lock);
3178 /* Exit if no suitable entries were found for adding bulk switch rule */
3180 status = ICE_SUCCESS;
3181 goto ice_add_mac_exit;
3184 /* Allocate switch rule buffer for the bulk update for unicast */
3185 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3186 s_rule = (struct ice_aqc_sw_rules_elem *)
3187 ice_calloc(hw, num_unicast, s_rule_size);
3189 status = ICE_ERR_NO_MEMORY;
3190 goto ice_add_mac_exit;
3194 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3196 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3197 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3199 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3200 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3201 ice_aqc_opc_add_sw_rules);
3202 r_iter = (struct ice_aqc_sw_rules_elem *)
3203 ((u8 *)r_iter + s_rule_size);
3207 /* Call AQ bulk switch rule update for all unicast addresses */
3209 /* Call AQ switch rule in AQ_MAX chunk */
3210 for (total_elem_left = num_unicast; total_elem_left > 0;
3211 total_elem_left -= elem_sent) {
3212 struct ice_aqc_sw_rules_elem *entry = r_iter;
3214 elem_sent = MIN_T(u8, total_elem_left,
3215 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3216 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3217 elem_sent, ice_aqc_opc_add_sw_rules,
3220 goto ice_add_mac_exit;
3221 r_iter = (struct ice_aqc_sw_rules_elem *)
3222 ((u8 *)r_iter + (elem_sent * s_rule_size));
3225 /* Fill up rule ID based on the value returned from FW */
3227 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3229 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3230 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3231 struct ice_fltr_mgmt_list_entry *fm_entry;
3233 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3234 f_info->fltr_rule_id =
3235 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3236 f_info->fltr_act = ICE_FWD_TO_VSI;
3237 /* Create an entry to track this MAC address */
3238 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3239 ice_malloc(hw, sizeof(*fm_entry));
3241 status = ICE_ERR_NO_MEMORY;
3242 goto ice_add_mac_exit;
3244 fm_entry->fltr_info = *f_info;
3245 fm_entry->vsi_count = 1;
3246 /* The book keeping entries will get removed when
3247 * base driver calls remove filter AQ command
3250 LIST_ADD(&fm_entry->list_entry, rule_head);
3251 r_iter = (struct ice_aqc_sw_rules_elem *)
3252 ((u8 *)r_iter + s_rule_size);
3257 ice_release_lock(rule_lock);
3259 ice_free(hw, s_rule);
3264 * ice_add_mac - Add a MAC address based filter rule
3265 * @hw: pointer to the hardware structure
3266 * @m_list: list of MAC addresses and forwarding information
3268 * Function add MAC rule for logical port from HW struct
3271 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3274 return ICE_ERR_PARAM;
3276 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3277 hw->port_info->lport);
3281 * ice_add_vlan_internal - Add one VLAN based filter rule
3282 * @hw: pointer to the hardware structure
3283 * @recp_list: recipe list for which rule has to be added
3284 * @f_entry: filter entry containing one VLAN information
3286 static enum ice_status
3287 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3288 struct ice_fltr_list_entry *f_entry)
3290 struct ice_fltr_mgmt_list_entry *v_list_itr;
3291 struct ice_fltr_info *new_fltr, *cur_fltr;
3292 enum ice_sw_lkup_type lkup_type;
3293 u16 vsi_list_id = 0, vsi_handle;
3294 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3295 enum ice_status status = ICE_SUCCESS;
3297 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3298 return ICE_ERR_PARAM;
3300 f_entry->fltr_info.fwd_id.hw_vsi_id =
3301 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3302 new_fltr = &f_entry->fltr_info;
3304 /* VLAN ID should only be 12 bits */
3305 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3306 return ICE_ERR_PARAM;
3308 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3309 return ICE_ERR_PARAM;
3311 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3312 lkup_type = new_fltr->lkup_type;
3313 vsi_handle = new_fltr->vsi_handle;
3314 rule_lock = &recp_list->filt_rule_lock;
3315 ice_acquire_lock(rule_lock);
3316 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3318 struct ice_vsi_list_map_info *map_info = NULL;
3320 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3321 /* All VLAN pruning rules use a VSI list. Check if
3322 * there is already a VSI list containing VSI that we
3323 * want to add. If found, use the same vsi_list_id for
3324 * this new VLAN rule or else create a new list.
3326 map_info = ice_find_vsi_list_entry(recp_list,
3330 status = ice_create_vsi_list_rule(hw,
3338 /* Convert the action to forwarding to a VSI list. */
3339 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3340 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3343 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3345 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3348 status = ICE_ERR_DOES_NOT_EXIST;
3351 /* reuse VSI list for new rule and increment ref_cnt */
3353 v_list_itr->vsi_list_info = map_info;
3354 map_info->ref_cnt++;
3356 v_list_itr->vsi_list_info =
3357 ice_create_vsi_list_map(hw, &vsi_handle,
3361 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3362 /* Update existing VSI list to add new VSI ID only if it used
3365 cur_fltr = &v_list_itr->fltr_info;
3366 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3369 /* If VLAN rule exists and VSI list being used by this rule is
3370 * referenced by more than 1 VLAN rule. Then create a new VSI
3371 * list appending previous VSI with new VSI and update existing
3372 * VLAN rule to point to new VSI list ID
3374 struct ice_fltr_info tmp_fltr;
3375 u16 vsi_handle_arr[2];
3378 /* Current implementation only supports reusing VSI list with
3379 * one VSI count. We should never hit below condition
3381 if (v_list_itr->vsi_count > 1 &&
3382 v_list_itr->vsi_list_info->ref_cnt > 1) {
3383 ice_debug(hw, ICE_DBG_SW,
3384 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3385 status = ICE_ERR_CFG;
3390 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3393 /* A rule already exists with the new VSI being added */
3394 if (cur_handle == vsi_handle) {
3395 status = ICE_ERR_ALREADY_EXISTS;
3399 vsi_handle_arr[0] = cur_handle;
3400 vsi_handle_arr[1] = vsi_handle;
3401 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3402 &vsi_list_id, lkup_type);
3406 tmp_fltr = v_list_itr->fltr_info;
3407 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3408 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3409 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3410 /* Update the previous switch rule to a new VSI list which
3411 * includes current VSI that is requested
3413 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3417 /* before overriding VSI list map info. decrement ref_cnt of
3420 v_list_itr->vsi_list_info->ref_cnt--;
3422 /* now update to newly created list */
3423 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3424 v_list_itr->vsi_list_info =
3425 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3427 v_list_itr->vsi_count++;
3431 ice_release_lock(rule_lock);
3436 * ice_add_vlan_rule - Add VLAN based filter rule
3437 * @hw: pointer to the hardware structure
3438 * @v_list: list of VLAN entries and forwarding information
3439 * @sw: pointer to switch info struct for which function add rule
3441 static enum ice_status
3442 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3443 struct ice_switch_info *sw)
3445 struct ice_fltr_list_entry *v_list_itr;
3446 struct ice_sw_recipe *recp_list;
3448 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3449 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3451 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3452 return ICE_ERR_PARAM;
3453 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3454 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3456 if (v_list_itr->status)
3457 return v_list_itr->status;
3463 * ice_add_vlan - Add a VLAN based filter rule
3464 * @hw: pointer to the hardware structure
3465 * @v_list: list of VLAN and forwarding information
3467 * Function add VLAN rule for logical port from HW struct
3470 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3473 return ICE_ERR_PARAM;
3475 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3479 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3480 * @hw: pointer to the hardware structure
3481 * @mv_list: list of MAC and VLAN filters
3483 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3484 * pruning bits enabled, then it is the responsibility of the caller to make
3485 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3486 * VLAN won't be received on that VSI otherwise.
3489 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3491 struct ice_fltr_list_entry *mv_list_itr;
3492 struct ice_sw_recipe *recp_list;
3494 if (!mv_list || !hw)
3495 return ICE_ERR_PARAM;
3497 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3498 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3500 enum ice_sw_lkup_type l_type =
3501 mv_list_itr->fltr_info.lkup_type;
3503 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3504 return ICE_ERR_PARAM;
3505 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3506 mv_list_itr->status =
3507 ice_add_rule_internal(hw, recp_list,
3508 hw->port_info->lport,
3510 if (mv_list_itr->status)
3511 return mv_list_itr->status;
3517 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3518 * @hw: pointer to the hardware structure
3519 * @em_list: list of ether type MAC filter, MAC is optional
3520 * @sw: pointer to switch info struct for which function add rule
3521 * @lport: logic port number on which function add rule
3523 * This function requires the caller to populate the entries in
3524 * the filter list with the necessary fields (including flags to
3525 * indicate Tx or Rx rules).
3527 static enum ice_status
3528 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3529 struct ice_switch_info *sw, u8 lport)
3531 struct ice_fltr_list_entry *em_list_itr;
3533 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3535 struct ice_sw_recipe *recp_list;
3536 enum ice_sw_lkup_type l_type;
3538 l_type = em_list_itr->fltr_info.lkup_type;
3539 recp_list = &sw->recp_list[l_type];
3541 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3542 l_type != ICE_SW_LKUP_ETHERTYPE)
3543 return ICE_ERR_PARAM;
3545 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3548 if (em_list_itr->status)
3549 return em_list_itr->status;
3556 * ice_add_eth_mac - Add a ethertype based filter rule
3557 * @hw: pointer to the hardware structure
3558 * @em_list: list of ethertype and forwarding information
3560 * Function add ethertype rule for logical port from HW struct
3562 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3564 if (!em_list || !hw)
3565 return ICE_ERR_PARAM;
3567 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3568 hw->port_info->lport);
3572 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3573 * @hw: pointer to the hardware structure
3574 * @em_list: list of ethertype or ethertype MAC entries
3575 * @sw: pointer to switch info struct for which function add rule
3577 static enum ice_status
3578 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3579 struct ice_switch_info *sw)
3581 struct ice_fltr_list_entry *em_list_itr, *tmp;
3583 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3585 struct ice_sw_recipe *recp_list;
3586 enum ice_sw_lkup_type l_type;
3588 l_type = em_list_itr->fltr_info.lkup_type;
3590 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3591 l_type != ICE_SW_LKUP_ETHERTYPE)
3592 return ICE_ERR_PARAM;
3594 recp_list = &sw->recp_list[l_type];
3595 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3597 if (em_list_itr->status)
3598 return em_list_itr->status;
3604 * ice_remove_eth_mac - remove a ethertype based filter rule
3605 * @hw: pointer to the hardware structure
3606 * @em_list: list of ethertype and forwarding information
3610 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3612 if (!em_list || !hw)
3613 return ICE_ERR_PARAM;
3615 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3619 * ice_rem_sw_rule_info
3620 * @hw: pointer to the hardware structure
3621 * @rule_head: pointer to the switch list structure that we want to delete
3624 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3626 if (!LIST_EMPTY(rule_head)) {
3627 struct ice_fltr_mgmt_list_entry *entry;
3628 struct ice_fltr_mgmt_list_entry *tmp;
3630 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3631 ice_fltr_mgmt_list_entry, list_entry) {
3632 LIST_DEL(&entry->list_entry);
3633 ice_free(hw, entry);
3639 * ice_rem_adv_rule_info
3640 * @hw: pointer to the hardware structure
3641 * @rule_head: pointer to the switch list structure that we want to delete
3644 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3646 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3647 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3649 if (LIST_EMPTY(rule_head))
3652 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3653 ice_adv_fltr_mgmt_list_entry, list_entry) {
3654 LIST_DEL(&lst_itr->list_entry);
3655 ice_free(hw, lst_itr->lkups);
3656 ice_free(hw, lst_itr);
3661 * ice_rem_all_sw_rules_info
3662 * @hw: pointer to the hardware structure
3664 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3666 struct ice_switch_info *sw = hw->switch_info;
3669 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3670 struct LIST_HEAD_TYPE *rule_head;
3672 rule_head = &sw->recp_list[i].filt_rules;
3673 if (!sw->recp_list[i].adv_rule)
3674 ice_rem_sw_rule_info(hw, rule_head);
3676 ice_rem_adv_rule_info(hw, rule_head);
3681 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3682 * @pi: pointer to the port_info structure
3683 * @vsi_handle: VSI handle to set as default
3684 * @set: true to add the above mentioned switch rule, false to remove it
3685 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3687 * add filter rule to set/unset given VSI as default VSI for the switch
3688 * (represented by swid)
3691 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3694 struct ice_aqc_sw_rules_elem *s_rule;
3695 struct ice_fltr_info f_info;
3696 struct ice_hw *hw = pi->hw;
3697 enum ice_adminq_opc opcode;
3698 enum ice_status status;
3702 if (!ice_is_vsi_valid(hw, vsi_handle))
3703 return ICE_ERR_PARAM;
3704 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3706 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3707 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3708 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3710 return ICE_ERR_NO_MEMORY;
3712 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3714 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3715 f_info.flag = direction;
3716 f_info.fltr_act = ICE_FWD_TO_VSI;
3717 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3719 if (f_info.flag & ICE_FLTR_RX) {
3720 f_info.src = pi->lport;
3721 f_info.src_id = ICE_SRC_ID_LPORT;
3723 f_info.fltr_rule_id =
3724 pi->dflt_rx_vsi_rule_id;
3725 } else if (f_info.flag & ICE_FLTR_TX) {
3726 f_info.src_id = ICE_SRC_ID_VSI;
3727 f_info.src = hw_vsi_id;
3729 f_info.fltr_rule_id =
3730 pi->dflt_tx_vsi_rule_id;
3734 opcode = ice_aqc_opc_add_sw_rules;
3736 opcode = ice_aqc_opc_remove_sw_rules;
3738 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3740 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3741 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3744 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3746 if (f_info.flag & ICE_FLTR_TX) {
3747 pi->dflt_tx_vsi_num = hw_vsi_id;
3748 pi->dflt_tx_vsi_rule_id = index;
3749 } else if (f_info.flag & ICE_FLTR_RX) {
3750 pi->dflt_rx_vsi_num = hw_vsi_id;
3751 pi->dflt_rx_vsi_rule_id = index;
3754 if (f_info.flag & ICE_FLTR_TX) {
3755 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3756 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3757 } else if (f_info.flag & ICE_FLTR_RX) {
3758 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3759 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3764 ice_free(hw, s_rule);
3769 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3770 * @list_head: head of rule list
3771 * @f_info: rule information
3773 * Helper function to search for a unicast rule entry - this is to be used
3774 * to remove unicast MAC filter that is not shared with other VSIs on the
3777 * Returns pointer to entry storing the rule if found
3779 static struct ice_fltr_mgmt_list_entry *
3780 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3781 struct ice_fltr_info *f_info)
3783 struct ice_fltr_mgmt_list_entry *list_itr;
3785 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3787 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3788 sizeof(f_info->l_data)) &&
3789 f_info->fwd_id.hw_vsi_id ==
3790 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3791 f_info->flag == list_itr->fltr_info.flag)
3798 * ice_remove_mac_rule - remove a MAC based filter rule
3799 * @hw: pointer to the hardware structure
3800 * @m_list: list of MAC addresses and forwarding information
3801 * @recp_list: list from which function remove MAC address
3803 * This function removes either a MAC filter rule or a specific VSI from a
3804 * VSI list for a multicast MAC address.
3806 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3807 * ice_add_mac. Caller should be aware that this call will only work if all
3808 * the entries passed into m_list were added previously. It will not attempt to
3809 * do a partial remove of entries that were found.
3811 static enum ice_status
3812 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3813 struct ice_sw_recipe *recp_list)
3815 struct ice_fltr_list_entry *list_itr, *tmp;
3816 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3819 return ICE_ERR_PARAM;
3821 rule_lock = &recp_list->filt_rule_lock;
3822 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3824 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3825 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3828 if (l_type != ICE_SW_LKUP_MAC)
3829 return ICE_ERR_PARAM;
3831 vsi_handle = list_itr->fltr_info.vsi_handle;
3832 if (!ice_is_vsi_valid(hw, vsi_handle))
3833 return ICE_ERR_PARAM;
3835 list_itr->fltr_info.fwd_id.hw_vsi_id =
3836 ice_get_hw_vsi_num(hw, vsi_handle);
3837 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3838 /* Don't remove the unicast address that belongs to
3839 * another VSI on the switch, since it is not being
3842 ice_acquire_lock(rule_lock);
3843 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3844 &list_itr->fltr_info)) {
3845 ice_release_lock(rule_lock);
3846 return ICE_ERR_DOES_NOT_EXIST;
3848 ice_release_lock(rule_lock);
3850 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3852 if (list_itr->status)
3853 return list_itr->status;
3859 * ice_remove_mac - remove a MAC address based filter rule
3860 * @hw: pointer to the hardware structure
3861 * @m_list: list of MAC addresses and forwarding information
3865 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3867 struct ice_sw_recipe *recp_list;
3869 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3870 return ice_remove_mac_rule(hw, m_list, recp_list);
3874 * ice_remove_vlan_rule - Remove VLAN based filter rule
3875 * @hw: pointer to the hardware structure
3876 * @v_list: list of VLAN entries and forwarding information
3877 * @recp_list: list from which function remove VLAN
3879 static enum ice_status
3880 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3881 struct ice_sw_recipe *recp_list)
3883 struct ice_fltr_list_entry *v_list_itr, *tmp;
3885 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3887 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3889 if (l_type != ICE_SW_LKUP_VLAN)
3890 return ICE_ERR_PARAM;
3891 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3893 if (v_list_itr->status)
3894 return v_list_itr->status;
3900 * ice_remove_vlan - remove a VLAN address based filter rule
3901 * @hw: pointer to the hardware structure
3902 * @v_list: list of VLAN and forwarding information
3906 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3908 struct ice_sw_recipe *recp_list;
3911 return ICE_ERR_PARAM;
3913 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3914 return ice_remove_vlan_rule(hw, v_list, recp_list);
3918 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3919 * @hw: pointer to the hardware structure
3920 * @v_list: list of MAC VLAN entries and forwarding information
3923 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3925 struct ice_fltr_list_entry *v_list_itr, *tmp;
3926 struct ice_sw_recipe *recp_list;
3929 return ICE_ERR_PARAM;
3931 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3932 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3934 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3936 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3937 return ICE_ERR_PARAM;
3938 v_list_itr->status =
3939 ice_remove_rule_internal(hw, recp_list,
3941 if (v_list_itr->status)
3942 return v_list_itr->status;
3948 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3949 * @fm_entry: filter entry to inspect
3950 * @vsi_handle: VSI handle to compare with filter info
3953 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3955 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3956 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3957 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3958 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3963 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3964 * @hw: pointer to the hardware structure
3965 * @vsi_handle: VSI handle to remove filters from
3966 * @vsi_list_head: pointer to the list to add entry to
3967 * @fi: pointer to fltr_info of filter entry to copy & add
3969 * Helper function, used when creating a list of filters to remove from
3970 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3971 * original filter entry, with the exception of fltr_info.fltr_act and
3972 * fltr_info.fwd_id fields. These are set such that later logic can
3973 * extract which VSI to remove the fltr from, and pass on that information.
3975 static enum ice_status
3976 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3977 struct LIST_HEAD_TYPE *vsi_list_head,
3978 struct ice_fltr_info *fi)
3980 struct ice_fltr_list_entry *tmp;
3982 /* this memory is freed up in the caller function
3983 * once filters for this VSI are removed
3985 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3987 return ICE_ERR_NO_MEMORY;
3989 tmp->fltr_info = *fi;
3991 /* Overwrite these fields to indicate which VSI to remove filter from,
3992 * so find and remove logic can extract the information from the
3993 * list entries. Note that original entries will still have proper
3996 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3997 tmp->fltr_info.vsi_handle = vsi_handle;
3998 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4000 LIST_ADD(&tmp->list_entry, vsi_list_head);
4006 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4007 * @hw: pointer to the hardware structure
4008 * @vsi_handle: VSI handle to remove filters from
4009 * @lkup_list_head: pointer to the list that has certain lookup type filters
4010 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4012 * Locates all filters in lkup_list_head that are used by the given VSI,
4013 * and adds COPIES of those entries to vsi_list_head (intended to be used
4014 * to remove the listed filters).
4015 * Note that this means all entries in vsi_list_head must be explicitly
4016 * deallocated by the caller when done with list.
4018 static enum ice_status
4019 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4020 struct LIST_HEAD_TYPE *lkup_list_head,
4021 struct LIST_HEAD_TYPE *vsi_list_head)
4023 struct ice_fltr_mgmt_list_entry *fm_entry;
4024 enum ice_status status = ICE_SUCCESS;
4026 /* check to make sure VSI ID is valid and within boundary */
4027 if (!ice_is_vsi_valid(hw, vsi_handle))
4028 return ICE_ERR_PARAM;
4030 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4031 ice_fltr_mgmt_list_entry, list_entry) {
4032 struct ice_fltr_info *fi;
4034 fi = &fm_entry->fltr_info;
4035 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4038 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4047 * ice_determine_promisc_mask
4048 * @fi: filter info to parse
4050 * Helper function to determine which ICE_PROMISC_ mask corresponds
4051 * to given filter into.
4053 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4055 u16 vid = fi->l_data.mac_vlan.vlan_id;
4056 u8 *macaddr = fi->l_data.mac.mac_addr;
4057 bool is_tx_fltr = false;
4058 u8 promisc_mask = 0;
4060 if (fi->flag == ICE_FLTR_TX)
4063 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4064 promisc_mask |= is_tx_fltr ?
4065 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4066 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4067 promisc_mask |= is_tx_fltr ?
4068 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4069 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4070 promisc_mask |= is_tx_fltr ?
4071 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4073 promisc_mask |= is_tx_fltr ?
4074 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4076 return promisc_mask;
4080 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4081 * @hw: pointer to the hardware structure
4082 * @vsi_handle: VSI handle to retrieve info from
4083 * @promisc_mask: pointer to mask to be filled in
4084 * @vid: VLAN ID of promisc VLAN VSI
4087 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4090 struct ice_switch_info *sw = hw->switch_info;
4091 struct ice_fltr_mgmt_list_entry *itr;
4092 struct LIST_HEAD_TYPE *rule_head;
4093 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4095 if (!ice_is_vsi_valid(hw, vsi_handle))
4096 return ICE_ERR_PARAM;
4100 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4101 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4103 ice_acquire_lock(rule_lock);
4104 LIST_FOR_EACH_ENTRY(itr, rule_head,
4105 ice_fltr_mgmt_list_entry, list_entry) {
4106 /* Continue if this filter doesn't apply to this VSI or the
4107 * VSI ID is not in the VSI map for this filter
4109 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4112 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4114 ice_release_lock(rule_lock);
4120 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4121 * @hw: pointer to the hardware structure
4122 * @vsi_handle: VSI handle to retrieve info from
4123 * @promisc_mask: pointer to mask to be filled in
4124 * @vid: VLAN ID of promisc VLAN VSI
4127 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4130 struct ice_switch_info *sw = hw->switch_info;
4131 struct ice_fltr_mgmt_list_entry *itr;
4132 struct LIST_HEAD_TYPE *rule_head;
4133 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4135 if (!ice_is_vsi_valid(hw, vsi_handle))
4136 return ICE_ERR_PARAM;
4140 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4141 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4143 ice_acquire_lock(rule_lock);
4144 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4146 /* Continue if this filter doesn't apply to this VSI or the
4147 * VSI ID is not in the VSI map for this filter
4149 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4152 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4154 ice_release_lock(rule_lock);
4160 * ice_remove_promisc - Remove promisc based filter rules
4161 * @hw: pointer to the hardware structure
4162 * @recp_id: recipe ID for which the rule needs to removed
4163 * @v_list: list of promisc entries
4165 static enum ice_status
4166 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4167 struct LIST_HEAD_TYPE *v_list)
4169 struct ice_fltr_list_entry *v_list_itr, *tmp;
4170 struct ice_sw_recipe *recp_list;
4172 recp_list = &hw->switch_info->recp_list[recp_id];
4173 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4175 v_list_itr->status =
4176 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4177 if (v_list_itr->status)
4178 return v_list_itr->status;
4184 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4185 * @hw: pointer to the hardware structure
4186 * @vsi_handle: VSI handle to clear mode
4187 * @promisc_mask: mask of promiscuous config bits to clear
4188 * @vid: VLAN ID to clear VLAN promiscuous
4191 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4194 struct ice_switch_info *sw = hw->switch_info;
4195 struct ice_fltr_list_entry *fm_entry, *tmp;
4196 struct LIST_HEAD_TYPE remove_list_head;
4197 struct ice_fltr_mgmt_list_entry *itr;
4198 struct LIST_HEAD_TYPE *rule_head;
4199 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4200 enum ice_status status = ICE_SUCCESS;
4203 if (!ice_is_vsi_valid(hw, vsi_handle))
4204 return ICE_ERR_PARAM;
4206 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4207 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4209 recipe_id = ICE_SW_LKUP_PROMISC;
4211 rule_head = &sw->recp_list[recipe_id].filt_rules;
4212 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4214 INIT_LIST_HEAD(&remove_list_head);
4216 ice_acquire_lock(rule_lock);
4217 LIST_FOR_EACH_ENTRY(itr, rule_head,
4218 ice_fltr_mgmt_list_entry, list_entry) {
4219 struct ice_fltr_info *fltr_info;
4220 u8 fltr_promisc_mask = 0;
4222 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4224 fltr_info = &itr->fltr_info;
4226 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4227 vid != fltr_info->l_data.mac_vlan.vlan_id)
4230 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4232 /* Skip if filter is not completely specified by given mask */
4233 if (fltr_promisc_mask & ~promisc_mask)
4236 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4240 ice_release_lock(rule_lock);
4241 goto free_fltr_list;
4244 ice_release_lock(rule_lock);
4246 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4249 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4250 ice_fltr_list_entry, list_entry) {
4251 LIST_DEL(&fm_entry->list_entry);
4252 ice_free(hw, fm_entry);
4259 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4260 * @hw: pointer to the hardware structure
4261 * @vsi_handle: VSI handle to configure
4262 * @promisc_mask: mask of promiscuous config bits
4263 * @vid: VLAN ID to set VLAN promiscuous
4266 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4268 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4269 struct ice_fltr_list_entry f_list_entry;
4270 struct ice_fltr_info new_fltr;
4271 enum ice_status status = ICE_SUCCESS;
4277 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4279 if (!ice_is_vsi_valid(hw, vsi_handle))
4280 return ICE_ERR_PARAM;
4281 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4283 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4285 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4286 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4287 new_fltr.l_data.mac_vlan.vlan_id = vid;
4288 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4290 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4291 recipe_id = ICE_SW_LKUP_PROMISC;
4294 /* Separate filters must be set for each direction/packet type
4295 * combination, so we will loop over the mask value, store the
4296 * individual type, and clear it out in the input mask as it
4299 while (promisc_mask) {
4300 struct ice_sw_recipe *recp_list;
4306 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4307 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4308 pkt_type = UCAST_FLTR;
4309 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4310 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4311 pkt_type = UCAST_FLTR;
4313 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4314 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4315 pkt_type = MCAST_FLTR;
4316 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4317 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4318 pkt_type = MCAST_FLTR;
4320 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4321 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4322 pkt_type = BCAST_FLTR;
4323 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4324 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4325 pkt_type = BCAST_FLTR;
4329 /* Check for VLAN promiscuous flag */
4330 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4331 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4332 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4333 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4337 /* Set filter DA based on packet type */
4338 mac_addr = new_fltr.l_data.mac.mac_addr;
4339 if (pkt_type == BCAST_FLTR) {
4340 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4341 } else if (pkt_type == MCAST_FLTR ||
4342 pkt_type == UCAST_FLTR) {
4343 /* Use the dummy ether header DA */
4344 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4345 ICE_NONDMA_TO_NONDMA);
4346 if (pkt_type == MCAST_FLTR)
4347 mac_addr[0] |= 0x1; /* Set multicast bit */
4350 /* Need to reset this to zero for all iterations */
4353 new_fltr.flag |= ICE_FLTR_TX;
4354 new_fltr.src = hw_vsi_id;
4356 new_fltr.flag |= ICE_FLTR_RX;
4357 new_fltr.src = hw->port_info->lport;
4360 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4361 new_fltr.vsi_handle = vsi_handle;
4362 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4363 f_list_entry.fltr_info = new_fltr;
4364 recp_list = &hw->switch_info->recp_list[recipe_id];
4366 status = ice_add_rule_internal(hw, recp_list,
4367 hw->port_info->lport,
4369 if (status != ICE_SUCCESS)
4370 goto set_promisc_exit;
4378 * ice_set_vlan_vsi_promisc
4379 * @hw: pointer to the hardware structure
4380 * @vsi_handle: VSI handle to configure
4381 * @promisc_mask: mask of promiscuous config bits
4382 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4384 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4387 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4388 bool rm_vlan_promisc)
4390 struct ice_switch_info *sw = hw->switch_info;
4391 struct ice_fltr_list_entry *list_itr, *tmp;
4392 struct LIST_HEAD_TYPE vsi_list_head;
4393 struct LIST_HEAD_TYPE *vlan_head;
4394 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4395 enum ice_status status;
4398 INIT_LIST_HEAD(&vsi_list_head);
4399 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4400 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4401 ice_acquire_lock(vlan_lock);
4402 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4404 ice_release_lock(vlan_lock);
4406 goto free_fltr_list;
4408 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4410 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4411 if (rm_vlan_promisc)
4412 status = ice_clear_vsi_promisc(hw, vsi_handle,
4413 promisc_mask, vlan_id);
4415 status = ice_set_vsi_promisc(hw, vsi_handle,
4416 promisc_mask, vlan_id);
4422 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4423 ice_fltr_list_entry, list_entry) {
4424 LIST_DEL(&list_itr->list_entry);
4425 ice_free(hw, list_itr);
4431 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4432 * @hw: pointer to the hardware structure
4433 * @vsi_handle: VSI handle to remove filters from
4434 * @recp_list: recipe list from which function remove fltr
4435 * @lkup: switch rule filter lookup type
4438 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4439 struct ice_sw_recipe *recp_list,
4440 enum ice_sw_lkup_type lkup)
4442 struct ice_fltr_list_entry *fm_entry;
4443 struct LIST_HEAD_TYPE remove_list_head;
4444 struct LIST_HEAD_TYPE *rule_head;
4445 struct ice_fltr_list_entry *tmp;
4446 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4447 enum ice_status status;
4449 INIT_LIST_HEAD(&remove_list_head);
4450 rule_lock = &recp_list[lkup].filt_rule_lock;
4451 rule_head = &recp_list[lkup].filt_rules;
4452 ice_acquire_lock(rule_lock);
4453 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4455 ice_release_lock(rule_lock);
4460 case ICE_SW_LKUP_MAC:
4461 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4463 case ICE_SW_LKUP_VLAN:
4464 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4466 case ICE_SW_LKUP_PROMISC:
4467 case ICE_SW_LKUP_PROMISC_VLAN:
4468 ice_remove_promisc(hw, lkup, &remove_list_head);
4470 case ICE_SW_LKUP_MAC_VLAN:
4471 ice_remove_mac_vlan(hw, &remove_list_head);
4473 case ICE_SW_LKUP_ETHERTYPE:
4474 case ICE_SW_LKUP_ETHERTYPE_MAC:
4475 ice_remove_eth_mac(hw, &remove_list_head);
4477 case ICE_SW_LKUP_DFLT:
4478 ice_debug(hw, ICE_DBG_SW,
4479 "Remove filters for this lookup type hasn't been implemented yet\n");
4481 case ICE_SW_LKUP_LAST:
4482 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4486 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4487 ice_fltr_list_entry, list_entry) {
4488 LIST_DEL(&fm_entry->list_entry);
4489 ice_free(hw, fm_entry);
4494 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4495 * @hw: pointer to the hardware structure
4496 * @vsi_handle: VSI handle to remove filters from
4497 * @sw: pointer to switch info struct
4500 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4501 struct ice_switch_info *sw)
4503 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4505 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4506 sw->recp_list, ICE_SW_LKUP_MAC);
4507 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4508 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4509 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4510 sw->recp_list, ICE_SW_LKUP_PROMISC);
4511 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4512 sw->recp_list, ICE_SW_LKUP_VLAN);
4513 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4514 sw->recp_list, ICE_SW_LKUP_DFLT);
4515 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4516 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4517 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4518 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4519 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4520 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4524 * ice_remove_vsi_fltr - Remove all filters for a VSI
4525 * @hw: pointer to the hardware structure
4526 * @vsi_handle: VSI handle to remove filters from
4528 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4530 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4534 * ice_alloc_res_cntr - allocating resource counter
4535 * @hw: pointer to the hardware structure
4536 * @type: type of resource
4537 * @alloc_shared: if set it is shared else dedicated
4538 * @num_items: number of entries requested for FD resource type
4539 * @counter_id: counter index returned by AQ call
4542 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4545 struct ice_aqc_alloc_free_res_elem *buf;
4546 enum ice_status status;
4549 /* Allocate resource */
4550 buf_len = sizeof(*buf);
4551 buf = (struct ice_aqc_alloc_free_res_elem *)
4552 ice_malloc(hw, buf_len);
4554 return ICE_ERR_NO_MEMORY;
4556 buf->num_elems = CPU_TO_LE16(num_items);
4557 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4558 ICE_AQC_RES_TYPE_M) | alloc_shared);
4560 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4561 ice_aqc_opc_alloc_res, NULL);
4565 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4573 * ice_free_res_cntr - free resource counter
4574 * @hw: pointer to the hardware structure
4575 * @type: type of resource
4576 * @alloc_shared: if set it is shared else dedicated
4577 * @num_items: number of entries to be freed for FD resource type
4578 * @counter_id: counter ID resource which needs to be freed
4581 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4584 struct ice_aqc_alloc_free_res_elem *buf;
4585 enum ice_status status;
4589 buf_len = sizeof(*buf);
4590 buf = (struct ice_aqc_alloc_free_res_elem *)
4591 ice_malloc(hw, buf_len);
4593 return ICE_ERR_NO_MEMORY;
4595 buf->num_elems = CPU_TO_LE16(num_items);
4596 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4597 ICE_AQC_RES_TYPE_M) | alloc_shared);
4598 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4600 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4601 ice_aqc_opc_free_res, NULL);
4603 ice_debug(hw, ICE_DBG_SW,
4604 "counter resource could not be freed\n");
4611 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4612 * @hw: pointer to the hardware structure
4613 * @counter_id: returns counter index
4615 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4617 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4618 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4623 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4624 * @hw: pointer to the hardware structure
4625 * @counter_id: counter index to be freed
4627 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4629 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4630 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4635 * ice_alloc_res_lg_act - add large action resource
4636 * @hw: pointer to the hardware structure
4637 * @l_id: large action ID to fill it in
4638 * @num_acts: number of actions to hold with a large action entry
4640 static enum ice_status
4641 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4643 struct ice_aqc_alloc_free_res_elem *sw_buf;
4644 enum ice_status status;
4647 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4648 return ICE_ERR_PARAM;
4650 /* Allocate resource for large action */
4651 buf_len = sizeof(*sw_buf);
4652 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4653 ice_malloc(hw, buf_len);
4655 return ICE_ERR_NO_MEMORY;
4657 sw_buf->num_elems = CPU_TO_LE16(1);
4659 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4660 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4661 * If num_acts is greater than 2, then use
4662 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4663 * The num_acts cannot exceed 4. This was ensured at the
4664 * beginning of the function.
4667 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4668 else if (num_acts == 2)
4669 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4671 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4673 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4674 ice_aqc_opc_alloc_res, NULL);
4676 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4678 ice_free(hw, sw_buf);
4683 * ice_add_mac_with_sw_marker - add filter with sw marker
4684 * @hw: pointer to the hardware structure
4685 * @f_info: filter info structure containing the MAC filter information
4686 * @sw_marker: sw marker to tag the Rx descriptor with
4689 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4692 struct ice_fltr_mgmt_list_entry *m_entry;
4693 struct ice_fltr_list_entry fl_info;
4694 struct ice_sw_recipe *recp_list;
4695 struct LIST_HEAD_TYPE l_head;
4696 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4697 enum ice_status ret;
4701 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4702 return ICE_ERR_PARAM;
4704 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4705 return ICE_ERR_PARAM;
4707 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4708 return ICE_ERR_PARAM;
4710 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4711 return ICE_ERR_PARAM;
4712 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4714 /* Add filter if it doesn't exist so then the adding of large
4715 * action always results in update
4718 INIT_LIST_HEAD(&l_head);
4719 fl_info.fltr_info = *f_info;
4720 LIST_ADD(&fl_info.list_entry, &l_head);
4722 entry_exists = false;
4723 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4724 hw->port_info->lport);
4725 if (ret == ICE_ERR_ALREADY_EXISTS)
4726 entry_exists = true;
4730 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4731 rule_lock = &recp_list->filt_rule_lock;
4732 ice_acquire_lock(rule_lock);
4733 /* Get the book keeping entry for the filter */
4734 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4738 /* If counter action was enabled for this rule then don't enable
4739 * sw marker large action
4741 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4742 ret = ICE_ERR_PARAM;
4746 /* if same marker was added before */
4747 if (m_entry->sw_marker_id == sw_marker) {
4748 ret = ICE_ERR_ALREADY_EXISTS;
4752 /* Allocate a hardware table entry to hold large act. Three actions
4753 * for marker based large action
4755 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4759 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4762 /* Update the switch rule to add the marker action */
4763 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4765 ice_release_lock(rule_lock);
4770 ice_release_lock(rule_lock);
4771 /* only remove entry if it did not exist previously */
4773 ret = ice_remove_mac(hw, &l_head);
4779 * ice_add_mac_with_counter - add filter with counter enabled
4780 * @hw: pointer to the hardware structure
4781 * @f_info: pointer to filter info structure containing the MAC filter
4785 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4787 struct ice_fltr_mgmt_list_entry *m_entry;
4788 struct ice_fltr_list_entry fl_info;
4789 struct ice_sw_recipe *recp_list;
4790 struct LIST_HEAD_TYPE l_head;
4791 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4792 enum ice_status ret;
4797 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4798 return ICE_ERR_PARAM;
4800 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4801 return ICE_ERR_PARAM;
4803 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4804 return ICE_ERR_PARAM;
4805 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4806 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4808 entry_exist = false;
4810 rule_lock = &recp_list->filt_rule_lock;
4812 /* Add filter if it doesn't exist so then the adding of large
4813 * action always results in update
4815 INIT_LIST_HEAD(&l_head);
4817 fl_info.fltr_info = *f_info;
4818 LIST_ADD(&fl_info.list_entry, &l_head);
4820 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4821 hw->port_info->lport);
4822 if (ret == ICE_ERR_ALREADY_EXISTS)
4827 ice_acquire_lock(rule_lock);
4828 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4830 ret = ICE_ERR_BAD_PTR;
4834 /* Don't enable counter for a filter for which sw marker was enabled */
4835 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4836 ret = ICE_ERR_PARAM;
4840 /* If a counter was already enabled then don't need to add again */
4841 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4842 ret = ICE_ERR_ALREADY_EXISTS;
4846 /* Allocate a hardware table entry to VLAN counter */
4847 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4851 /* Allocate a hardware table entry to hold large act. Two actions for
4852 * counter based large action
4854 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4858 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4861 /* Update the switch rule to add the counter action */
4862 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4864 ice_release_lock(rule_lock);
4869 ice_release_lock(rule_lock);
4870 /* only remove entry if it did not exist previously */
4872 ret = ice_remove_mac(hw, &l_head);
4877 /* This is mapping table entry that maps every word within a given protocol
4878 * structure to the real byte offset as per the specification of that
4880 * for example dst address is 3 words in ethertype header and corresponding
4881 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4882 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4883 * matching entry describing its field. This needs to be updated if new
4884 * structure is added to that union.
4886 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4887 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4888 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4889 { ICE_ETYPE_OL, { 0 } },
4890 { ICE_VLAN_OFOS, { 0, 2 } },
4891 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4892 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4893 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4894 26, 28, 30, 32, 34, 36, 38 } },
4895 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4896 26, 28, 30, 32, 34, 36, 38 } },
4897 { ICE_TCP_IL, { 0, 2 } },
4898 { ICE_UDP_OF, { 0, 2 } },
4899 { ICE_UDP_ILOS, { 0, 2 } },
4900 { ICE_SCTP_IL, { 0, 2 } },
4901 { ICE_VXLAN, { 8, 10, 12, 14 } },
4902 { ICE_GENEVE, { 8, 10, 12, 14 } },
4903 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4904 { ICE_NVGRE, { 0, 2, 4, 6 } },
4905 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4906 { ICE_PPPOE, { 0, 2, 4, 6 } },
4909 /* The following table describes preferred grouping of recipes.
4910 * If a recipe that needs to be programmed is a superset or matches one of the
4911 * following combinations, then the recipe needs to be chained as per the
4915 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4916 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4917 { ICE_MAC_IL, ICE_MAC_IL_HW },
4918 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4919 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4920 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4921 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4922 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4923 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4924 { ICE_TCP_IL, ICE_TCP_IL_HW },
4925 { ICE_UDP_OF, ICE_UDP_OF_HW },
4926 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4927 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4928 { ICE_VXLAN, ICE_UDP_OF_HW },
4929 { ICE_GENEVE, ICE_UDP_OF_HW },
4930 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4931 { ICE_NVGRE, ICE_GRE_OF_HW },
4932 { ICE_GTP, ICE_UDP_OF_HW },
4933 { ICE_PPPOE, ICE_PPPOE_HW },
4937 * ice_find_recp - find a recipe
4938 * @hw: pointer to the hardware structure
4939 * @lkup_exts: extension sequence to match
4941 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4943 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4945 bool refresh_required = true;
4946 struct ice_sw_recipe *recp;
4949 /* Walk through existing recipes to find a match */
4950 recp = hw->switch_info->recp_list;
4951 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4952 /* If recipe was not created for this ID, in SW bookkeeping,
4953 * check if FW has an entry for this recipe. If the FW has an
4954 * entry update it in our SW bookkeeping and continue with the
4957 if (!recp[i].recp_created)
4958 if (ice_get_recp_frm_fw(hw,
4959 hw->switch_info->recp_list, i,
4963 /* Skip inverse action recipes */
4964 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4965 ICE_AQ_RECIPE_ACT_INV_ACT)
4968 /* if number of words we are looking for match */
4969 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4970 struct ice_fv_word *a = lkup_exts->fv_words;
4971 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4975 for (p = 0; p < lkup_exts->n_val_words; p++) {
4976 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4978 if (a[p].off == b[q].off &&
4979 a[p].prot_id == b[q].prot_id)
4980 /* Found the "p"th word in the
4985 /* After walking through all the words in the
4986 * "i"th recipe if "p"th word was not found then
4987 * this recipe is not what we are looking for.
4988 * So break out from this loop and try the next
4991 if (q >= recp[i].lkup_exts.n_val_words) {
4996 /* If for "i"th recipe the found was never set to false
4997 * then it means we found our match
5000 return i; /* Return the recipe ID */
5003 return ICE_MAX_NUM_RECIPES;
5007 * ice_prot_type_to_id - get protocol ID from protocol type
5008 * @type: protocol type
5009 * @id: pointer to variable that will receive the ID
5011 * Returns true if found, false otherwise
5013 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5017 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5018 if (ice_prot_id_tbl[i].type == type) {
5019 *id = ice_prot_id_tbl[i].protocol_id;
5026 * ice_find_valid_words - count valid words
5027 * @rule: advanced rule with lookup information
5028 * @lkup_exts: byte offset extractions of the words that are valid
5030 * calculate valid words in a lookup rule using mask value
5033 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5034 struct ice_prot_lkup_ext *lkup_exts)
5036 u8 j, word, prot_id, ret_val;
5038 if (!ice_prot_type_to_id(rule->type, &prot_id))
5041 word = lkup_exts->n_val_words;
5043 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5044 if (((u16 *)&rule->m_u)[j] &&
5045 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5046 /* No more space to accommodate */
5047 if (word >= ICE_MAX_CHAIN_WORDS)
5049 lkup_exts->fv_words[word].off =
5050 ice_prot_ext[rule->type].offs[j];
5051 lkup_exts->fv_words[word].prot_id =
5052 ice_prot_id_tbl[rule->type].protocol_id;
5053 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5057 ret_val = word - lkup_exts->n_val_words;
5058 lkup_exts->n_val_words = word;
5064 * ice_create_first_fit_recp_def - Create a recipe grouping
5065 * @hw: pointer to the hardware structure
5066 * @lkup_exts: an array of protocol header extractions
5067 * @rg_list: pointer to a list that stores new recipe groups
5068 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5070 * Using first fit algorithm, take all the words that are still not done
5071 * and start grouping them in 4-word groups. Each group makes up one
5074 static enum ice_status
5075 ice_create_first_fit_recp_def(struct ice_hw *hw,
5076 struct ice_prot_lkup_ext *lkup_exts,
5077 struct LIST_HEAD_TYPE *rg_list,
5080 struct ice_pref_recipe_group *grp = NULL;
5085 /* Walk through every word in the rule to check if it is not done. If so
5086 * then this word needs to be part of a new recipe.
5088 for (j = 0; j < lkup_exts->n_val_words; j++)
5089 if (!ice_is_bit_set(lkup_exts->done, j)) {
5091 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5092 struct ice_recp_grp_entry *entry;
5094 entry = (struct ice_recp_grp_entry *)
5095 ice_malloc(hw, sizeof(*entry));
5097 return ICE_ERR_NO_MEMORY;
5098 LIST_ADD(&entry->l_entry, rg_list);
5099 grp = &entry->r_group;
5103 grp->pairs[grp->n_val_pairs].prot_id =
5104 lkup_exts->fv_words[j].prot_id;
5105 grp->pairs[grp->n_val_pairs].off =
5106 lkup_exts->fv_words[j].off;
5107 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5115 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5116 * @hw: pointer to the hardware structure
5117 * @fv_list: field vector with the extraction sequence information
5118 * @rg_list: recipe groupings with protocol-offset pairs
5120 * Helper function to fill in the field vector indices for protocol-offset
5121 * pairs. These indexes are then ultimately programmed into a recipe.
5123 static enum ice_status
5124 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5125 struct LIST_HEAD_TYPE *rg_list)
5127 struct ice_sw_fv_list_entry *fv;
5128 struct ice_recp_grp_entry *rg;
5129 struct ice_fv_word *fv_ext;
5131 if (LIST_EMPTY(fv_list))
5134 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5135 fv_ext = fv->fv_ptr->ew;
5137 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5140 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5141 struct ice_fv_word *pr;
5146 pr = &rg->r_group.pairs[i];
5147 mask = rg->r_group.mask[i];
5149 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5150 if (fv_ext[j].prot_id == pr->prot_id &&
5151 fv_ext[j].off == pr->off) {
5154 /* Store index of field vector */
5156 /* Mask is given by caller as big
5157 * endian, but sent to FW as little
5160 rg->fv_mask[i] = mask << 8 | mask >> 8;
5164 /* Protocol/offset could not be found, caller gave an
5168 return ICE_ERR_PARAM;
5176 * ice_find_free_recp_res_idx - find free result indexes for recipe
5177 * @hw: pointer to hardware structure
5178 * @profiles: bitmap of profiles that will be associated with the new recipe
5179 * @free_idx: pointer to variable to receive the free index bitmap
5181 * The algorithm used here is:
5182 * 1. When creating a new recipe, create a set P which contains all
5183 * Profiles that will be associated with our new recipe
5185 * 2. For each Profile p in set P:
5186 * a. Add all recipes associated with Profile p into set R
5187 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5188 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5189 * i. Or just assume they all have the same possible indexes:
5191 * i.e., PossibleIndexes = 0x0000F00000000000
5193 * 3. For each Recipe r in set R:
5194 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5195 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5197 * FreeIndexes will contain the bits indicating the indexes free for use,
5198 * then the code needs to update the recipe[r].used_result_idx_bits to
5199 * indicate which indexes were selected for use by this recipe.
5202 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5203 ice_bitmap_t *free_idx)
5205 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5206 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5207 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5211 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5212 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5213 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5214 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5216 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5217 ice_set_bit(count, possible_idx);
5219 /* For each profile we are going to associate the recipe with, add the
5220 * recipes that are associated with that profile. This will give us
5221 * the set of recipes that our recipe may collide with. Also, determine
5222 * what possible result indexes are usable given this set of profiles.
5225 while (ICE_MAX_NUM_PROFILES >
5226 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5227 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5228 ICE_MAX_NUM_RECIPES);
5229 ice_and_bitmap(possible_idx, possible_idx,
5230 hw->switch_info->prof_res_bm[bit],
5235 /* For each recipe that our new recipe may collide with, determine
5236 * which indexes have been used.
5238 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5239 if (ice_is_bit_set(recipes, bit)) {
5240 ice_or_bitmap(used_idx, used_idx,
5241 hw->switch_info->recp_list[bit].res_idxs,
5245 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5247 /* return number of free indexes */
5250 while (ICE_MAX_FV_WORDS >
5251 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5260 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5261 * @hw: pointer to hardware structure
5262 * @rm: recipe management list entry
5263 * @match_tun: if field vector index for tunnel needs to be programmed
5264 * @profiles: bitmap of profiles that will be assocated.
5266 static enum ice_status
5267 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5268 bool match_tun, ice_bitmap_t *profiles)
5270 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5271 struct ice_aqc_recipe_data_elem *tmp;
5272 struct ice_aqc_recipe_data_elem *buf;
5273 struct ice_recp_grp_entry *entry;
5274 enum ice_status status;
5280 /* When more than one recipe are required, another recipe is needed to
5281 * chain them together. Matching a tunnel metadata ID takes up one of
5282 * the match fields in the chaining recipe reducing the number of
5283 * chained recipes by one.
5285 /* check number of free result indices */
5286 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5287 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5289 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5290 free_res_idx, rm->n_grp_count);
5292 if (rm->n_grp_count > 1) {
5293 if (rm->n_grp_count > free_res_idx)
5294 return ICE_ERR_MAX_LIMIT;
5299 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5300 ICE_MAX_NUM_RECIPES,
5303 return ICE_ERR_NO_MEMORY;
5305 buf = (struct ice_aqc_recipe_data_elem *)
5306 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5308 status = ICE_ERR_NO_MEMORY;
5312 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5313 recipe_count = ICE_MAX_NUM_RECIPES;
5314 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5316 if (status || recipe_count == 0)
5319 /* Allocate the recipe resources, and configure them according to the
5320 * match fields from protocol headers and extracted field vectors.
5322 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5323 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5326 status = ice_alloc_recipe(hw, &entry->rid);
5330 /* Clear the result index of the located recipe, as this will be
5331 * updated, if needed, later in the recipe creation process.
5333 tmp[0].content.result_indx = 0;
5335 buf[recps] = tmp[0];
5336 buf[recps].recipe_indx = (u8)entry->rid;
5337 /* if the recipe is a non-root recipe RID should be programmed
5338 * as 0 for the rules to be applied correctly.
5340 buf[recps].content.rid = 0;
5341 ice_memset(&buf[recps].content.lkup_indx, 0,
5342 sizeof(buf[recps].content.lkup_indx),
5345 /* All recipes use look-up index 0 to match switch ID. */
5346 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5347 buf[recps].content.mask[0] =
5348 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5349 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5352 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5353 buf[recps].content.lkup_indx[i] = 0x80;
5354 buf[recps].content.mask[i] = 0;
5357 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5358 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5359 buf[recps].content.mask[i + 1] =
5360 CPU_TO_LE16(entry->fv_mask[i]);
5363 if (rm->n_grp_count > 1) {
5364 /* Checks to see if there really is a valid result index
5367 if (chain_idx >= ICE_MAX_FV_WORDS) {
5368 ice_debug(hw, ICE_DBG_SW,
5369 "No chain index available\n");
5370 status = ICE_ERR_MAX_LIMIT;
5374 entry->chain_idx = chain_idx;
5375 buf[recps].content.result_indx =
5376 ICE_AQ_RECIPE_RESULT_EN |
5377 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5378 ICE_AQ_RECIPE_RESULT_DATA_M);
5379 ice_clear_bit(chain_idx, result_idx_bm);
5380 chain_idx = ice_find_first_bit(result_idx_bm,
5384 /* fill recipe dependencies */
5385 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5386 ICE_MAX_NUM_RECIPES);
5387 ice_set_bit(buf[recps].recipe_indx,
5388 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5389 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5393 if (rm->n_grp_count == 1) {
5394 rm->root_rid = buf[0].recipe_indx;
5395 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5396 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5397 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5398 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5399 sizeof(buf[0].recipe_bitmap),
5400 ICE_NONDMA_TO_NONDMA);
5402 status = ICE_ERR_BAD_PTR;
5405 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5406 * the recipe which is getting created if specified
5407 * by user. Usually any advanced switch filter, which results
5408 * into new extraction sequence, ended up creating a new recipe
5409 * of type ROOT and usually recipes are associated with profiles
5410 * Switch rule referreing newly created recipe, needs to have
5411 * either/or 'fwd' or 'join' priority, otherwise switch rule
5412 * evaluation will not happen correctly. In other words, if
5413 * switch rule to be evaluated on priority basis, then recipe
5414 * needs to have priority, otherwise it will be evaluated last.
5416 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5418 struct ice_recp_grp_entry *last_chain_entry;
5421 /* Allocate the last recipe that will chain the outcomes of the
5422 * other recipes together
5424 status = ice_alloc_recipe(hw, &rid);
5428 buf[recps].recipe_indx = (u8)rid;
5429 buf[recps].content.rid = (u8)rid;
5430 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5431 /* the new entry created should also be part of rg_list to
5432 * make sure we have complete recipe
5434 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5435 sizeof(*last_chain_entry));
5436 if (!last_chain_entry) {
5437 status = ICE_ERR_NO_MEMORY;
5440 last_chain_entry->rid = rid;
5441 ice_memset(&buf[recps].content.lkup_indx, 0,
5442 sizeof(buf[recps].content.lkup_indx),
5444 /* All recipes use look-up index 0 to match switch ID. */
5445 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5446 buf[recps].content.mask[0] =
5447 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5448 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5449 buf[recps].content.lkup_indx[i] =
5450 ICE_AQ_RECIPE_LKUP_IGNORE;
5451 buf[recps].content.mask[i] = 0;
5455 /* update r_bitmap with the recp that is used for chaining */
5456 ice_set_bit(rid, rm->r_bitmap);
5457 /* this is the recipe that chains all the other recipes so it
5458 * should not have a chaining ID to indicate the same
5460 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5461 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5463 last_chain_entry->fv_idx[i] = entry->chain_idx;
5464 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5465 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5466 ice_set_bit(entry->rid, rm->r_bitmap);
5468 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5469 if (sizeof(buf[recps].recipe_bitmap) >=
5470 sizeof(rm->r_bitmap)) {
5471 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5472 sizeof(buf[recps].recipe_bitmap),
5473 ICE_NONDMA_TO_NONDMA);
5475 status = ICE_ERR_BAD_PTR;
5478 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5480 /* To differentiate among different UDP tunnels, a meta data ID
5484 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5485 buf[recps].content.mask[i] =
5486 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5490 rm->root_rid = (u8)rid;
5492 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5496 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5497 ice_release_change_lock(hw);
5501 /* Every recipe that just got created add it to the recipe
5504 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5505 struct ice_switch_info *sw = hw->switch_info;
5506 bool is_root, idx_found = false;
5507 struct ice_sw_recipe *recp;
5508 u16 idx, buf_idx = 0;
5510 /* find buffer index for copying some data */
5511 for (idx = 0; idx < rm->n_grp_count; idx++)
5512 if (buf[idx].recipe_indx == entry->rid) {
5518 status = ICE_ERR_OUT_OF_RANGE;
5522 recp = &sw->recp_list[entry->rid];
5523 is_root = (rm->root_rid == entry->rid);
5524 recp->is_root = is_root;
5526 recp->root_rid = entry->rid;
5527 recp->big_recp = (is_root && rm->n_grp_count > 1);
5529 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5530 entry->r_group.n_val_pairs *
5531 sizeof(struct ice_fv_word),
5532 ICE_NONDMA_TO_NONDMA);
5534 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5535 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5537 /* Copy non-result fv index values and masks to recipe. This
5538 * call will also update the result recipe bitmask.
5540 ice_collect_result_idx(&buf[buf_idx], recp);
5542 /* for non-root recipes, also copy to the root, this allows
5543 * easier matching of a complete chained recipe
5546 ice_collect_result_idx(&buf[buf_idx],
5547 &sw->recp_list[rm->root_rid]);
5549 recp->n_ext_words = entry->r_group.n_val_pairs;
5550 recp->chain_idx = entry->chain_idx;
5551 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5552 recp->n_grp_count = rm->n_grp_count;
5553 recp->tun_type = rm->tun_type;
5554 recp->recp_created = true;
5569 * ice_create_recipe_group - creates recipe group
5570 * @hw: pointer to hardware structure
5571 * @rm: recipe management list entry
5572 * @lkup_exts: lookup elements
5574 static enum ice_status
5575 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5576 struct ice_prot_lkup_ext *lkup_exts)
5578 enum ice_status status;
5581 rm->n_grp_count = 0;
5583 /* Create recipes for words that are marked not done by packing them
5586 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5587 &rm->rg_list, &recp_count);
5589 rm->n_grp_count += recp_count;
5590 rm->n_ext_words = lkup_exts->n_val_words;
5591 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5592 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5593 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5594 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5601 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5602 * @hw: pointer to hardware structure
5603 * @lkups: lookup elements or match criteria for the advanced recipe, one
5604 * structure per protocol header
5605 * @lkups_cnt: number of protocols
5606 * @bm: bitmap of field vectors to consider
5607 * @fv_list: pointer to a list that holds the returned field vectors
5609 static enum ice_status
5610 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5611 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5613 enum ice_status status;
5617 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5619 return ICE_ERR_NO_MEMORY;
5621 for (i = 0; i < lkups_cnt; i++)
5622 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5623 status = ICE_ERR_CFG;
5627 /* Find field vectors that include all specified protocol types */
5628 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5631 ice_free(hw, prot_ids);
5636 * ice_add_special_words - Add words that are not protocols, such as metadata
5637 * @rinfo: other information regarding the rule e.g. priority and action info
5638 * @lkup_exts: lookup word structure
5640 static enum ice_status
5641 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5642 struct ice_prot_lkup_ext *lkup_exts)
5644 /* If this is a tunneled packet, then add recipe index to match the
5645 * tunnel bit in the packet metadata flags.
5647 if (rinfo->tun_type != ICE_NON_TUN) {
5648 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5649 u8 word = lkup_exts->n_val_words++;
5651 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5652 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5654 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5656 return ICE_ERR_MAX_LIMIT;
5663 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5664 * @hw: pointer to hardware structure
5665 * @rinfo: other information regarding the rule e.g. priority and action info
5666 * @bm: pointer to memory for returning the bitmap of field vectors
5669 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5672 enum ice_prof_type prof_type;
5674 switch (rinfo->tun_type) {
5676 prof_type = ICE_PROF_NON_TUN;
5678 case ICE_ALL_TUNNELS:
5679 prof_type = ICE_PROF_TUN_ALL;
5681 case ICE_SW_TUN_VXLAN_GPE:
5682 case ICE_SW_TUN_GENEVE:
5683 case ICE_SW_TUN_VXLAN:
5684 case ICE_SW_TUN_UDP:
5685 case ICE_SW_TUN_GTP:
5686 prof_type = ICE_PROF_TUN_UDP;
5688 case ICE_SW_TUN_NVGRE:
5689 prof_type = ICE_PROF_TUN_GRE;
5691 case ICE_SW_TUN_PPPOE:
5692 prof_type = ICE_PROF_TUN_PPPOE;
5694 case ICE_SW_TUN_AND_NON_TUN:
5696 prof_type = ICE_PROF_ALL;
5700 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5704 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5705 * @hw: pointer to hardware structure
5706 * @lkups: lookup elements or match criteria for the advanced recipe, one
5707 * structure per protocol header
5708 * @lkups_cnt: number of protocols
5709 * @rinfo: other information regarding the rule e.g. priority and action info
5710 * @rid: return the recipe ID of the recipe created
5712 static enum ice_status
5713 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5714 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5716 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5717 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5718 struct ice_prot_lkup_ext *lkup_exts;
5719 struct ice_recp_grp_entry *r_entry;
5720 struct ice_sw_fv_list_entry *fvit;
5721 struct ice_recp_grp_entry *r_tmp;
5722 struct ice_sw_fv_list_entry *tmp;
5723 enum ice_status status = ICE_SUCCESS;
5724 struct ice_sw_recipe *rm;
5725 bool match_tun = false;
5729 return ICE_ERR_PARAM;
5731 lkup_exts = (struct ice_prot_lkup_ext *)
5732 ice_malloc(hw, sizeof(*lkup_exts));
5734 return ICE_ERR_NO_MEMORY;
5736 /* Determine the number of words to be matched and if it exceeds a
5737 * recipe's restrictions
5739 for (i = 0; i < lkups_cnt; i++) {
5742 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5743 status = ICE_ERR_CFG;
5744 goto err_free_lkup_exts;
5747 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5749 status = ICE_ERR_CFG;
5750 goto err_free_lkup_exts;
5754 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5756 status = ICE_ERR_NO_MEMORY;
5757 goto err_free_lkup_exts;
5760 /* Get field vectors that contain fields extracted from all the protocol
5761 * headers being programmed.
5763 INIT_LIST_HEAD(&rm->fv_list);
5764 INIT_LIST_HEAD(&rm->rg_list);
5766 /* Get bitmap of field vectors (profiles) that are compatible with the
5767 * rule request; only these will be searched in the subsequent call to
5770 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5772 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5776 /* Group match words into recipes using preferred recipe grouping
5779 status = ice_create_recipe_group(hw, rm, lkup_exts);
5783 /* There is only profile for UDP tunnels. So, it is necessary to use a
5784 * metadata ID flag to differentiate different tunnel types. A separate
5785 * recipe needs to be used for the metadata.
5787 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5788 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5789 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5792 /* set the recipe priority if specified */
5793 rm->priority = (u8)rinfo->priority;
5795 /* Find offsets from the field vector. Pick the first one for all the
5798 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5802 /* get bitmap of all profiles the recipe will be associated with */
5803 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5804 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5806 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5807 ice_set_bit((u16)fvit->profile_id, profiles);
5810 /* Create any special protocol/offset pairs, such as looking at tunnel
5811 * bits by extracting metadata
5813 status = ice_add_special_words(rinfo, lkup_exts);
5815 goto err_free_lkup_exts;
5817 /* Look for a recipe which matches our requested fv / mask list */
5818 *rid = ice_find_recp(hw, lkup_exts);
5819 if (*rid < ICE_MAX_NUM_RECIPES)
5820 /* Success if found a recipe that match the existing criteria */
5823 /* Recipe we need does not exist, add a recipe */
5824 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5828 /* Associate all the recipes created with all the profiles in the
5829 * common field vector.
5831 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5833 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5836 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5837 (u8 *)r_bitmap, NULL);
5841 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5842 ICE_MAX_NUM_RECIPES);
5843 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5847 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5850 ice_release_change_lock(hw);
5855 /* Update profile to recipe bitmap array */
5856 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5857 ICE_MAX_NUM_RECIPES);
5859 /* Update recipe to profile bitmap array */
5860 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5861 if (ice_is_bit_set(r_bitmap, j))
5862 ice_set_bit((u16)fvit->profile_id,
5863 recipe_to_profile[j]);
5866 *rid = rm->root_rid;
5867 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5868 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5870 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5871 ice_recp_grp_entry, l_entry) {
5872 LIST_DEL(&r_entry->l_entry);
5873 ice_free(hw, r_entry);
5876 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5878 LIST_DEL(&fvit->list_entry);
5883 ice_free(hw, rm->root_buf);
5888 ice_free(hw, lkup_exts);
5894 * ice_find_dummy_packet - find dummy packet by tunnel type
5896 * @lkups: lookup elements or match criteria for the advanced recipe, one
5897 * structure per protocol header
5898 * @lkups_cnt: number of protocols
5899 * @tun_type: tunnel type from the match criteria
5900 * @pkt: dummy packet to fill according to filter match criteria
5901 * @pkt_len: packet length of dummy packet
5902 * @offsets: pointer to receive the pointer to the offsets for the packet
5905 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5906 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5908 const struct ice_dummy_pkt_offsets **offsets)
5910 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5913 if (tun_type == ICE_SW_TUN_GTP) {
5914 *pkt = dummy_udp_gtp_packet;
5915 *pkt_len = sizeof(dummy_udp_gtp_packet);
5916 *offsets = dummy_udp_gtp_packet_offsets;
5919 if (tun_type == ICE_SW_TUN_PPPOE) {
5920 *pkt = dummy_pppoe_packet;
5921 *pkt_len = sizeof(dummy_pppoe_packet);
5922 *offsets = dummy_pppoe_packet_offsets;
5925 for (i = 0; i < lkups_cnt; i++) {
5926 if (lkups[i].type == ICE_UDP_ILOS)
5928 else if (lkups[i].type == ICE_TCP_IL)
5930 else if (lkups[i].type == ICE_IPV6_OFOS)
5932 else if (lkups[i].type == ICE_VLAN_OFOS)
5936 if (tun_type == ICE_ALL_TUNNELS) {
5937 *pkt = dummy_gre_udp_packet;
5938 *pkt_len = sizeof(dummy_gre_udp_packet);
5939 *offsets = dummy_gre_udp_packet_offsets;
5943 if (tun_type == ICE_SW_TUN_NVGRE) {
5945 *pkt = dummy_gre_tcp_packet;
5946 *pkt_len = sizeof(dummy_gre_tcp_packet);
5947 *offsets = dummy_gre_tcp_packet_offsets;
5951 *pkt = dummy_gre_udp_packet;
5952 *pkt_len = sizeof(dummy_gre_udp_packet);
5953 *offsets = dummy_gre_udp_packet_offsets;
5957 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5958 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5960 *pkt = dummy_udp_tun_tcp_packet;
5961 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5962 *offsets = dummy_udp_tun_tcp_packet_offsets;
5966 *pkt = dummy_udp_tun_udp_packet;
5967 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5968 *offsets = dummy_udp_tun_udp_packet_offsets;
5974 *pkt = dummy_vlan_udp_packet;
5975 *pkt_len = sizeof(dummy_vlan_udp_packet);
5976 *offsets = dummy_vlan_udp_packet_offsets;
5979 *pkt = dummy_udp_packet;
5980 *pkt_len = sizeof(dummy_udp_packet);
5981 *offsets = dummy_udp_packet_offsets;
5983 } else if (udp && ipv6) {
5985 *pkt = dummy_vlan_udp_ipv6_packet;
5986 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5987 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5990 *pkt = dummy_udp_ipv6_packet;
5991 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5992 *offsets = dummy_udp_ipv6_packet_offsets;
5994 } else if ((tcp && ipv6) || ipv6) {
5996 *pkt = dummy_vlan_tcp_ipv6_packet;
5997 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
5998 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6001 *pkt = dummy_tcp_ipv6_packet;
6002 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6003 *offsets = dummy_tcp_ipv6_packet_offsets;
6008 *pkt = dummy_vlan_tcp_packet;
6009 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6010 *offsets = dummy_vlan_tcp_packet_offsets;
6012 *pkt = dummy_tcp_packet;
6013 *pkt_len = sizeof(dummy_tcp_packet);
6014 *offsets = dummy_tcp_packet_offsets;
6019 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6021 * @lkups: lookup elements or match criteria for the advanced recipe, one
6022 * structure per protocol header
6023 * @lkups_cnt: number of protocols
6024 * @s_rule: stores rule information from the match criteria
6025 * @dummy_pkt: dummy packet to fill according to filter match criteria
6026 * @pkt_len: packet length of dummy packet
6027 * @offsets: offset info for the dummy packet
6029 static enum ice_status
6030 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6031 struct ice_aqc_sw_rules_elem *s_rule,
6032 const u8 *dummy_pkt, u16 pkt_len,
6033 const struct ice_dummy_pkt_offsets *offsets)
6038 /* Start with a packet with a pre-defined/dummy content. Then, fill
6039 * in the header values to be looked up or matched.
6041 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6043 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6045 for (i = 0; i < lkups_cnt; i++) {
6046 enum ice_protocol_type type;
6047 u16 offset = 0, len = 0, j;
6050 /* find the start of this layer; it should be found since this
6051 * was already checked when search for the dummy packet
6053 type = lkups[i].type;
6054 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6055 if (type == offsets[j].type) {
6056 offset = offsets[j].offset;
6061 /* this should never happen in a correct calling sequence */
6063 return ICE_ERR_PARAM;
6065 switch (lkups[i].type) {
6068 len = sizeof(struct ice_ether_hdr);
6071 len = sizeof(struct ice_ethtype_hdr);
6074 len = sizeof(struct ice_vlan_hdr);
6078 len = sizeof(struct ice_ipv4_hdr);
6082 len = sizeof(struct ice_ipv6_hdr);
6087 len = sizeof(struct ice_l4_hdr);
6090 len = sizeof(struct ice_sctp_hdr);
6093 len = sizeof(struct ice_nvgre);
6098 len = sizeof(struct ice_udp_tnl_hdr);
6102 len = sizeof(struct ice_udp_gtp_hdr);
6105 len = sizeof(struct ice_pppoe_hdr);
6108 return ICE_ERR_PARAM;
6111 /* the length should be a word multiple */
6112 if (len % ICE_BYTES_PER_WORD)
6115 /* We have the offset to the header start, the length, the
6116 * caller's header values and mask. Use this information to
6117 * copy the data into the dummy packet appropriately based on
6118 * the mask. Note that we need to only write the bits as
6119 * indicated by the mask to make sure we don't improperly write
6120 * over any significant packet data.
6122 for (j = 0; j < len / sizeof(u16); j++)
6123 if (((u16 *)&lkups[i].m_u)[j])
6124 ((u16 *)(pkt + offset))[j] =
6125 (((u16 *)(pkt + offset))[j] &
6126 ~((u16 *)&lkups[i].m_u)[j]) |
6127 (((u16 *)&lkups[i].h_u)[j] &
6128 ((u16 *)&lkups[i].m_u)[j]);
6131 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6137 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6138 * @hw: pointer to the hardware structure
6139 * @tun_type: tunnel type
6140 * @pkt: dummy packet to fill in
6141 * @offsets: offset info for the dummy packet
6143 static enum ice_status
6144 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6145 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6150 case ICE_SW_TUN_AND_NON_TUN:
6151 case ICE_SW_TUN_VXLAN_GPE:
6152 case ICE_SW_TUN_VXLAN:
6153 case ICE_SW_TUN_UDP:
6154 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6158 case ICE_SW_TUN_GENEVE:
6159 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6164 /* Nothing needs to be done for this tunnel type */
6168 /* Find the outer UDP protocol header and insert the port number */
6169 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6170 if (offsets[i].type == ICE_UDP_OF) {
6171 struct ice_l4_hdr *hdr;
6174 offset = offsets[i].offset;
6175 hdr = (struct ice_l4_hdr *)&pkt[offset];
6176 hdr->dst_port = CPU_TO_BE16(open_port);
6186 * ice_find_adv_rule_entry - Search a rule entry
6187 * @hw: pointer to the hardware structure
6188 * @lkups: lookup elements or match criteria for the advanced recipe, one
6189 * structure per protocol header
6190 * @lkups_cnt: number of protocols
6191 * @recp_id: recipe ID for which we are finding the rule
6192 * @rinfo: other information regarding the rule e.g. priority and action info
6194 * Helper function to search for a given advance rule entry
6195 * Returns pointer to entry storing the rule if found
6197 static struct ice_adv_fltr_mgmt_list_entry *
6198 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6199 u16 lkups_cnt, u16 recp_id,
6200 struct ice_adv_rule_info *rinfo)
6202 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6203 struct ice_switch_info *sw = hw->switch_info;
6206 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6207 ice_adv_fltr_mgmt_list_entry, list_entry) {
6208 bool lkups_matched = true;
6210 if (lkups_cnt != list_itr->lkups_cnt)
6212 for (i = 0; i < list_itr->lkups_cnt; i++)
6213 if (memcmp(&list_itr->lkups[i], &lkups[i],
6215 lkups_matched = false;
6218 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6219 rinfo->tun_type == list_itr->rule_info.tun_type &&
6227 * ice_adv_add_update_vsi_list
6228 * @hw: pointer to the hardware structure
6229 * @m_entry: pointer to current adv filter management list entry
6230 * @cur_fltr: filter information from the book keeping entry
6231 * @new_fltr: filter information with the new VSI to be added
6233 * Call AQ command to add or update previously created VSI list with new VSI.
6235 * Helper function to do book keeping associated with adding filter information
6236 * The algorithm to do the booking keeping is described below :
6237 * When a VSI needs to subscribe to a given advanced filter
6238 * if only one VSI has been added till now
6239 * Allocate a new VSI list and add two VSIs
6240 * to this list using switch rule command
6241 * Update the previously created switch rule with the
6242 * newly created VSI list ID
6243 * if a VSI list was previously created
6244 * Add the new VSI to the previously created VSI list set
6245 * using the update switch rule command
6247 static enum ice_status
6248 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6249 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6250 struct ice_adv_rule_info *cur_fltr,
6251 struct ice_adv_rule_info *new_fltr)
6253 enum ice_status status;
6254 u16 vsi_list_id = 0;
6256 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6257 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6258 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6259 return ICE_ERR_NOT_IMPL;
6261 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6262 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6263 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6264 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6265 return ICE_ERR_NOT_IMPL;
6267 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6268 /* Only one entry existed in the mapping and it was not already
6269 * a part of a VSI list. So, create a VSI list with the old and
6272 struct ice_fltr_info tmp_fltr;
6273 u16 vsi_handle_arr[2];
6275 /* A rule already exists with the new VSI being added */
6276 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6277 new_fltr->sw_act.fwd_id.hw_vsi_id)
6278 return ICE_ERR_ALREADY_EXISTS;
6280 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6281 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6282 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6288 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6289 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6290 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6291 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6292 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6294 /* Update the previous switch rule of "forward to VSI" to
6297 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6301 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6302 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6303 m_entry->vsi_list_info =
6304 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6307 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6309 if (!m_entry->vsi_list_info)
6312 /* A rule already exists with the new VSI being added */
6313 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6316 /* Update the previously created VSI list set with
6317 * the new VSI ID passed in
6319 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6321 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6323 ice_aqc_opc_update_sw_rules,
6325 /* update VSI list mapping info with new VSI ID */
6327 ice_set_bit(vsi_handle,
6328 m_entry->vsi_list_info->vsi_map);
6331 m_entry->vsi_count++;
6336 * ice_add_adv_rule - helper function to create an advanced switch rule
6337 * @hw: pointer to the hardware structure
6338 * @lkups: information on the words that needs to be looked up. All words
6339 * together makes one recipe
6340 * @lkups_cnt: num of entries in the lkups array
6341 * @rinfo: other information related to the rule that needs to be programmed
6342 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6343 * ignored is case of error.
6345 * This function can program only 1 rule at a time. The lkups is used to
6346 * describe the all the words that forms the "lookup" portion of the recipe.
6347 * These words can span multiple protocols. Callers to this function need to
6348 * pass in a list of protocol headers with lookup information along and mask
6349 * that determines which words are valid from the given protocol header.
6350 * rinfo describes other information related to this rule such as forwarding
6351 * IDs, priority of this rule, etc.
6354 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6355 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6356 struct ice_rule_query_data *added_entry)
6358 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6359 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6360 const struct ice_dummy_pkt_offsets *pkt_offsets;
6361 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6362 struct LIST_HEAD_TYPE *rule_head;
6363 struct ice_switch_info *sw;
6364 enum ice_status status;
6365 const u8 *pkt = NULL;
6370 /* Initialize profile to result index bitmap */
6371 if (!hw->switch_info->prof_res_bm_init) {
6372 hw->switch_info->prof_res_bm_init = 1;
6373 ice_init_prof_result_bm(hw);
6377 return ICE_ERR_PARAM;
6379 /* get # of words we need to match */
6381 for (i = 0; i < lkups_cnt; i++) {
6384 ptr = (u16 *)&lkups[i].m_u;
6385 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6389 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6390 return ICE_ERR_PARAM;
6392 /* make sure that we can locate a dummy packet */
6393 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6396 status = ICE_ERR_PARAM;
6397 goto err_ice_add_adv_rule;
6400 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6401 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6402 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6403 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6406 vsi_handle = rinfo->sw_act.vsi_handle;
6407 if (!ice_is_vsi_valid(hw, vsi_handle))
6408 return ICE_ERR_PARAM;
6410 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6411 rinfo->sw_act.fwd_id.hw_vsi_id =
6412 ice_get_hw_vsi_num(hw, vsi_handle);
6413 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6414 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6416 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6419 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6421 /* we have to add VSI to VSI_LIST and increment vsi_count.
6422 * Also Update VSI list so that we can change forwarding rule
6423 * if the rule already exists, we will check if it exists with
6424 * same vsi_id, if not then add it to the VSI list if it already
6425 * exists if not then create a VSI list and add the existing VSI
6426 * ID and the new VSI ID to the list
6427 * We will add that VSI to the list
6429 status = ice_adv_add_update_vsi_list(hw, m_entry,
6430 &m_entry->rule_info,
6433 added_entry->rid = rid;
6434 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6435 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6439 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6440 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6442 return ICE_ERR_NO_MEMORY;
6443 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6444 switch (rinfo->sw_act.fltr_act) {
6445 case ICE_FWD_TO_VSI:
6446 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6447 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6448 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6451 act |= ICE_SINGLE_ACT_TO_Q;
6452 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6453 ICE_SINGLE_ACT_Q_INDEX_M;
6455 case ICE_FWD_TO_QGRP:
6456 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6457 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6458 act |= ICE_SINGLE_ACT_TO_Q;
6459 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6460 ICE_SINGLE_ACT_Q_INDEX_M;
6461 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6462 ICE_SINGLE_ACT_Q_REGION_M;
6464 case ICE_DROP_PACKET:
6465 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6466 ICE_SINGLE_ACT_VALID_BIT;
6469 status = ICE_ERR_CFG;
6470 goto err_ice_add_adv_rule;
6473 /* set the rule LOOKUP type based on caller specified 'RX'
6474 * instead of hardcoding it to be either LOOKUP_TX/RX
6476 * for 'RX' set the source to be the port number
6477 * for 'TX' set the source to be the source HW VSI number (determined
6481 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6482 s_rule->pdata.lkup_tx_rx.src =
6483 CPU_TO_LE16(hw->port_info->lport);
6485 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6486 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6489 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6490 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6492 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6493 pkt_len, pkt_offsets);
6495 goto err_ice_add_adv_rule;
6497 if (rinfo->tun_type != ICE_NON_TUN &&
6498 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6499 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6500 s_rule->pdata.lkup_tx_rx.hdr,
6503 goto err_ice_add_adv_rule;
6506 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6507 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6510 goto err_ice_add_adv_rule;
6511 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6512 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6514 status = ICE_ERR_NO_MEMORY;
6515 goto err_ice_add_adv_rule;
6518 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6519 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6520 ICE_NONDMA_TO_NONDMA);
6521 if (!adv_fltr->lkups) {
6522 status = ICE_ERR_NO_MEMORY;
6523 goto err_ice_add_adv_rule;
6526 adv_fltr->lkups_cnt = lkups_cnt;
6527 adv_fltr->rule_info = *rinfo;
6528 adv_fltr->rule_info.fltr_rule_id =
6529 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6530 sw = hw->switch_info;
6531 sw->recp_list[rid].adv_rule = true;
6532 rule_head = &sw->recp_list[rid].filt_rules;
6534 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6535 struct ice_fltr_info tmp_fltr;
6537 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6538 tmp_fltr.fltr_rule_id =
6539 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6540 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6541 tmp_fltr.fwd_id.hw_vsi_id =
6542 ice_get_hw_vsi_num(hw, vsi_handle);
6543 tmp_fltr.vsi_handle = vsi_handle;
6544 /* Update the previous switch rule of "forward to VSI" to
6547 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6549 goto err_ice_add_adv_rule;
6550 adv_fltr->vsi_count = 1;
6553 /* Add rule entry to book keeping list */
6554 LIST_ADD(&adv_fltr->list_entry, rule_head);
6556 added_entry->rid = rid;
6557 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6558 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6560 err_ice_add_adv_rule:
6561 if (status && adv_fltr) {
6562 ice_free(hw, adv_fltr->lkups);
6563 ice_free(hw, adv_fltr);
6566 ice_free(hw, s_rule);
6572 * ice_adv_rem_update_vsi_list
6573 * @hw: pointer to the hardware structure
6574 * @vsi_handle: VSI handle of the VSI to remove
6575 * @fm_list: filter management entry for which the VSI list management needs to
6578 static enum ice_status
6579 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6580 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6582 struct ice_vsi_list_map_info *vsi_list_info;
6583 enum ice_sw_lkup_type lkup_type;
6584 enum ice_status status;
6587 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6588 fm_list->vsi_count == 0)
6589 return ICE_ERR_PARAM;
6591 /* A rule with the VSI being removed does not exist */
6592 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6593 return ICE_ERR_DOES_NOT_EXIST;
6595 lkup_type = ICE_SW_LKUP_LAST;
6596 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6597 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6598 ice_aqc_opc_update_sw_rules,
6603 fm_list->vsi_count--;
6604 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6605 vsi_list_info = fm_list->vsi_list_info;
6606 if (fm_list->vsi_count == 1) {
6607 struct ice_fltr_info tmp_fltr;
6610 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6612 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6613 return ICE_ERR_OUT_OF_RANGE;
6615 /* Make sure VSI list is empty before removing it below */
6616 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6618 ice_aqc_opc_update_sw_rules,
6623 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6624 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6625 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6626 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6627 tmp_fltr.fwd_id.hw_vsi_id =
6628 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6629 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6630 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6632 /* Update the previous switch rule of "MAC forward to VSI" to
6633 * "MAC fwd to VSI list"
6635 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6637 ice_debug(hw, ICE_DBG_SW,
6638 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6639 tmp_fltr.fwd_id.hw_vsi_id, status);
6643 /* Remove the VSI list since it is no longer used */
6644 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6646 ice_debug(hw, ICE_DBG_SW,
6647 "Failed to remove VSI list %d, error %d\n",
6648 vsi_list_id, status);
6652 LIST_DEL(&vsi_list_info->list_entry);
6653 ice_free(hw, vsi_list_info);
6654 fm_list->vsi_list_info = NULL;
6661 * ice_rem_adv_rule - removes existing advanced switch rule
6662 * @hw: pointer to the hardware structure
6663 * @lkups: information on the words that needs to be looked up. All words
6664 * together makes one recipe
6665 * @lkups_cnt: num of entries in the lkups array
6666 * @rinfo: Its the pointer to the rule information for the rule
6668 * This function can be used to remove 1 rule at a time. The lkups is
6669 * used to describe all the words that forms the "lookup" portion of the
6670 * rule. These words can span multiple protocols. Callers to this function
6671 * need to pass in a list of protocol headers with lookup information along
6672 * and mask that determines which words are valid from the given protocol
6673 * header. rinfo describes other information related to this rule such as
6674 * forwarding IDs, priority of this rule, etc.
6677 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6678 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6680 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6681 struct ice_prot_lkup_ext lkup_exts;
6682 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6683 enum ice_status status = ICE_SUCCESS;
6684 bool remove_rule = false;
6685 u16 i, rid, vsi_handle;
6687 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6688 for (i = 0; i < lkups_cnt; i++) {
6691 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6694 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6699 /* Create any special protocol/offset pairs, such as looking at tunnel
6700 * bits by extracting metadata
6702 status = ice_add_special_words(rinfo, &lkup_exts);
6706 rid = ice_find_recp(hw, &lkup_exts);
6707 /* If did not find a recipe that match the existing criteria */
6708 if (rid == ICE_MAX_NUM_RECIPES)
6709 return ICE_ERR_PARAM;
6711 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6712 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6713 /* the rule is already removed */
6716 ice_acquire_lock(rule_lock);
6717 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6719 } else if (list_elem->vsi_count > 1) {
6720 list_elem->vsi_list_info->ref_cnt--;
6721 remove_rule = false;
6722 vsi_handle = rinfo->sw_act.vsi_handle;
6723 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6725 vsi_handle = rinfo->sw_act.vsi_handle;
6726 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6728 ice_release_lock(rule_lock);
6731 if (list_elem->vsi_count == 0)
6734 ice_release_lock(rule_lock);
6736 struct ice_aqc_sw_rules_elem *s_rule;
6739 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6741 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6744 return ICE_ERR_NO_MEMORY;
6745 s_rule->pdata.lkup_tx_rx.act = 0;
6746 s_rule->pdata.lkup_tx_rx.index =
6747 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6748 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6749 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6751 ice_aqc_opc_remove_sw_rules, NULL);
6752 if (status == ICE_SUCCESS) {
6753 ice_acquire_lock(rule_lock);
6754 LIST_DEL(&list_elem->list_entry);
6755 ice_free(hw, list_elem->lkups);
6756 ice_free(hw, list_elem);
6757 ice_release_lock(rule_lock);
6759 ice_free(hw, s_rule);
6765 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6766 * @hw: pointer to the hardware structure
6767 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6769 * This function is used to remove 1 rule at a time. The removal is based on
6770 * the remove_entry parameter. This function will remove rule for a given
6771 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6774 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6775 struct ice_rule_query_data *remove_entry)
6777 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6778 struct LIST_HEAD_TYPE *list_head;
6779 struct ice_adv_rule_info rinfo;
6780 struct ice_switch_info *sw;
6782 sw = hw->switch_info;
6783 if (!sw->recp_list[remove_entry->rid].recp_created)
6784 return ICE_ERR_PARAM;
6785 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6786 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6788 if (list_itr->rule_info.fltr_rule_id ==
6789 remove_entry->rule_id) {
6790 rinfo = list_itr->rule_info;
6791 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6792 return ice_rem_adv_rule(hw, list_itr->lkups,
6793 list_itr->lkups_cnt, &rinfo);
6796 return ICE_ERR_PARAM;
6800 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6802 * @hw: pointer to the hardware structure
6803 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6805 * This function is used to remove all the rules for a given VSI and as soon
6806 * as removing a rule fails, it will return immediately with the error code,
6807 * else it will return ICE_SUCCESS
6810 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6812 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6813 struct ice_vsi_list_map_info *map_info;
6814 struct LIST_HEAD_TYPE *list_head;
6815 struct ice_adv_rule_info rinfo;
6816 struct ice_switch_info *sw;
6817 enum ice_status status;
6818 u16 vsi_list_id = 0;
6821 sw = hw->switch_info;
6822 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6823 if (!sw->recp_list[rid].recp_created)
6825 if (!sw->recp_list[rid].adv_rule)
6827 list_head = &sw->recp_list[rid].filt_rules;
6829 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6830 ice_adv_fltr_mgmt_list_entry, list_entry) {
6831 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6836 rinfo = list_itr->rule_info;
6837 rinfo.sw_act.vsi_handle = vsi_handle;
6838 status = ice_rem_adv_rule(hw, list_itr->lkups,
6839 list_itr->lkups_cnt, &rinfo);
6849 * ice_replay_fltr - Replay all the filters stored by a specific list head
6850 * @hw: pointer to the hardware structure
6851 * @list_head: list for which filters needs to be replayed
6852 * @recp_id: Recipe ID for which rules need to be replayed
6854 static enum ice_status
6855 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6857 struct ice_fltr_mgmt_list_entry *itr;
6858 enum ice_status status = ICE_SUCCESS;
6859 struct ice_sw_recipe *recp_list;
6860 u8 lport = hw->port_info->lport;
6861 struct LIST_HEAD_TYPE l_head;
6863 if (LIST_EMPTY(list_head))
6866 recp_list = &hw->switch_info->recp_list[recp_id];
6867 /* Move entries from the given list_head to a temporary l_head so that
6868 * they can be replayed. Otherwise when trying to re-add the same
6869 * filter, the function will return already exists
6871 LIST_REPLACE_INIT(list_head, &l_head);
6873 /* Mark the given list_head empty by reinitializing it so filters
6874 * could be added again by *handler
6876 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6878 struct ice_fltr_list_entry f_entry;
6880 f_entry.fltr_info = itr->fltr_info;
6881 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6882 status = ice_add_rule_internal(hw, recp_list, lport,
6884 if (status != ICE_SUCCESS)
6889 /* Add a filter per VSI separately */
6894 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6896 if (!ice_is_vsi_valid(hw, vsi_handle))
6899 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6900 f_entry.fltr_info.vsi_handle = vsi_handle;
6901 f_entry.fltr_info.fwd_id.hw_vsi_id =
6902 ice_get_hw_vsi_num(hw, vsi_handle);
6903 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6904 if (recp_id == ICE_SW_LKUP_VLAN)
6905 status = ice_add_vlan_internal(hw, recp_list,
6908 status = ice_add_rule_internal(hw, recp_list,
6911 if (status != ICE_SUCCESS)
6916 /* Clear the filter management list */
6917 ice_rem_sw_rule_info(hw, &l_head);
6922 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6923 * @hw: pointer to the hardware structure
6925 * NOTE: This function does not clean up partially added filters on error.
6926 * It is up to caller of the function to issue a reset or fail early.
6928 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6930 struct ice_switch_info *sw = hw->switch_info;
6931 enum ice_status status = ICE_SUCCESS;
6934 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6935 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6937 status = ice_replay_fltr(hw, i, head);
6938 if (status != ICE_SUCCESS)
6945 * ice_replay_vsi_fltr - Replay filters for requested VSI
6946 * @hw: pointer to the hardware structure
6947 * @vsi_handle: driver VSI handle
6948 * @recp_id: Recipe ID for which rules need to be replayed
6949 * @list_head: list for which filters need to be replayed
6951 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6952 * It is required to pass valid VSI handle.
6954 static enum ice_status
6955 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6956 struct LIST_HEAD_TYPE *list_head)
6958 struct ice_fltr_mgmt_list_entry *itr;
6959 enum ice_status status = ICE_SUCCESS;
6960 struct ice_sw_recipe *recp_list;
6963 if (LIST_EMPTY(list_head))
6965 recp_list = &hw->switch_info->recp_list[recp_id];
6966 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6968 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6970 struct ice_fltr_list_entry f_entry;
6972 f_entry.fltr_info = itr->fltr_info;
6973 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6974 itr->fltr_info.vsi_handle == vsi_handle) {
6975 /* update the src in case it is VSI num */
6976 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6977 f_entry.fltr_info.src = hw_vsi_id;
6978 status = ice_add_rule_internal(hw, recp_list,
6979 hw->port_info->lport,
6981 if (status != ICE_SUCCESS)
6985 if (!itr->vsi_list_info ||
6986 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6988 /* Clearing it so that the logic can add it back */
6989 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6990 f_entry.fltr_info.vsi_handle = vsi_handle;
6991 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6992 /* update the src in case it is VSI num */
6993 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6994 f_entry.fltr_info.src = hw_vsi_id;
6995 if (recp_id == ICE_SW_LKUP_VLAN)
6996 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
6998 status = ice_add_rule_internal(hw, recp_list,
6999 hw->port_info->lport,
7001 if (status != ICE_SUCCESS)
7009 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7010 * @hw: pointer to the hardware structure
7011 * @vsi_handle: driver VSI handle
7012 * @list_head: list for which filters need to be replayed
7014 * Replay the advanced rule for the given VSI.
7016 static enum ice_status
7017 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7018 struct LIST_HEAD_TYPE *list_head)
7020 struct ice_rule_query_data added_entry = { 0 };
7021 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7022 enum ice_status status = ICE_SUCCESS;
7024 if (LIST_EMPTY(list_head))
7026 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7028 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7029 u16 lk_cnt = adv_fltr->lkups_cnt;
7031 if (vsi_handle != rinfo->sw_act.vsi_handle)
7033 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7042 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7043 * @hw: pointer to the hardware structure
7044 * @vsi_handle: driver VSI handle
7046 * Replays filters for requested VSI via vsi_handle.
7048 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7050 struct ice_switch_info *sw = hw->switch_info;
7051 enum ice_status status;
7054 /* Update the recipes that were created */
7055 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7056 struct LIST_HEAD_TYPE *head;
7058 head = &sw->recp_list[i].filt_replay_rules;
7059 if (!sw->recp_list[i].adv_rule)
7060 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7062 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7063 if (status != ICE_SUCCESS)
7071 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7072 * @hw: pointer to the HW struct
7074 * Deletes the filter replay rules.
7076 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7078 struct ice_switch_info *sw = hw->switch_info;
7084 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7085 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7086 struct LIST_HEAD_TYPE *l_head;
7088 l_head = &sw->recp_list[i].filt_replay_rules;
7089 if (!sw->recp_list[i].adv_rule)
7090 ice_rem_sw_rule_info(hw, l_head);
7092 ice_rem_adv_rule_info(hw, l_head);