1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
60 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
63 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
71 static const u8 dummy_gre_tcp_packet[] = {
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
76 0x08, 0x00, /* ICE_ETYPE_OL 12 */
78 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
79 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x2F, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
84 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
85 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
88 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00,
92 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
93 0x00, 0x00, 0x00, 0x00,
94 0x00, 0x06, 0x00, 0x00,
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x50, 0x02, 0x20, 0x00,
102 0x00, 0x00, 0x00, 0x00
105 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
107 { ICE_ETYPE_OL, 12 },
108 { ICE_IPV4_OFOS, 14 },
112 { ICE_UDP_ILOS, 76 },
113 { ICE_PROTOCOL_LAST, 0 },
116 static const u8 dummy_gre_udp_packet[] = {
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
121 0x08, 0x00, /* ICE_ETYPE_OL 12 */
123 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
124 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x2F, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
129 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
130 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
137 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x11, 0x00, 0x00,
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
144 0x00, 0x08, 0x00, 0x00,
147 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
149 { ICE_ETYPE_OL, 12 },
150 { ICE_IPV4_OFOS, 14 },
154 { ICE_VXLAN_GPE, 42 },
158 { ICE_PROTOCOL_LAST, 0 },
161 static const u8 dummy_udp_tun_tcp_packet[] = {
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x08, 0x00, /* ICE_ETYPE_OL 12 */
168 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x11, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
175 0x00, 0x46, 0x00, 0x00,
177 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
181 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
185 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
186 0x00, 0x01, 0x00, 0x00,
187 0x40, 0x06, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x50, 0x02, 0x20, 0x00,
195 0x00, 0x00, 0x00, 0x00
198 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
200 { ICE_ETYPE_OL, 12 },
201 { ICE_IPV4_OFOS, 14 },
205 { ICE_VXLAN_GPE, 42 },
208 { ICE_UDP_ILOS, 84 },
209 { ICE_PROTOCOL_LAST, 0 },
212 static const u8 dummy_udp_tun_udp_packet[] = {
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
217 0x08, 0x00, /* ICE_ETYPE_OL 12 */
219 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
226 0x00, 0x3a, 0x00, 0x00,
228 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
236 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
237 0x00, 0x01, 0x00, 0x00,
238 0x00, 0x11, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
243 0x00, 0x08, 0x00, 0x00,
246 /* offset info for MAC + IPv4 + UDP dummy packet */
247 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
249 { ICE_ETYPE_OL, 12 },
250 { ICE_IPV4_OFOS, 14 },
251 { ICE_UDP_ILOS, 34 },
252 { ICE_PROTOCOL_LAST, 0 },
255 /* Dummy packet for MAC + IPv4 + UDP */
256 static const u8 dummy_udp_packet[] = {
257 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
258 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00,
261 0x08, 0x00, /* ICE_ETYPE_OL 12 */
263 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
264 0x00, 0x01, 0x00, 0x00,
265 0x00, 0x11, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
270 0x00, 0x08, 0x00, 0x00,
272 0x00, 0x00, /* 2 bytes for 4 byte alignment */
275 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
276 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
278 { ICE_ETYPE_OL, 12 },
279 { ICE_VLAN_OFOS, 14 },
280 { ICE_IPV4_OFOS, 18 },
281 { ICE_UDP_ILOS, 38 },
282 { ICE_PROTOCOL_LAST, 0 },
285 /* C-tag (801.1Q), IPv4:UDP dummy packet */
286 static const u8 dummy_vlan_udp_packet[] = {
287 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
288 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00,
291 0x81, 0x00, /* ICE_ETYPE_OL 12 */
293 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
295 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
296 0x00, 0x01, 0x00, 0x00,
297 0x00, 0x11, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
302 0x00, 0x08, 0x00, 0x00,
304 0x00, 0x00, /* 2 bytes for 4 byte alignment */
307 /* offset info for MAC + IPv4 + TCP dummy packet */
308 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
310 { ICE_ETYPE_OL, 12 },
311 { ICE_IPV4_OFOS, 14 },
313 { ICE_PROTOCOL_LAST, 0 },
316 /* Dummy packet for MAC + IPv4 + TCP */
317 static const u8 dummy_tcp_packet[] = {
318 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
319 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00,
322 0x08, 0x00, /* ICE_ETYPE_OL 12 */
324 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
325 0x00, 0x01, 0x00, 0x00,
326 0x00, 0x06, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x50, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, /* 2 bytes for 4 byte alignment */
339 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
340 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_VLAN_OFOS, 14 },
344 { ICE_IPV4_OFOS, 18 },
346 { ICE_PROTOCOL_LAST, 0 },
349 /* C-tag (801.1Q), IPv4:TCP dummy packet */
350 static const u8 dummy_vlan_tcp_packet[] = {
351 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
352 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00,
355 0x81, 0x00, /* ICE_ETYPE_OL 12 */
357 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
359 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
360 0x00, 0x01, 0x00, 0x00,
361 0x00, 0x06, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
368 0x50, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, /* 2 bytes for 4 byte alignment */
374 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
376 { ICE_ETYPE_OL, 12 },
377 { ICE_IPV6_OFOS, 14 },
379 { ICE_PROTOCOL_LAST, 0 },
382 static const u8 dummy_tcp_ipv6_packet[] = {
383 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
389 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
390 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
391 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x50, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, /* 2 bytes for 4 byte alignment */
409 /* C-tag (802.1Q): IPv6 + TCP */
410 static const struct ice_dummy_pkt_offsets
411 dummy_vlan_tcp_ipv6_packet_offsets[] = {
413 { ICE_ETYPE_OL, 12 },
414 { ICE_VLAN_OFOS, 14 },
415 { ICE_IPV6_OFOS, 18 },
417 { ICE_PROTOCOL_LAST, 0 },
420 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
421 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x81, 0x00, /* ICE_ETYPE_OL 12 */
428 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
430 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
431 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x50, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, /* 2 bytes for 4 byte alignment */
451 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
453 { ICE_ETYPE_OL, 12 },
454 { ICE_IPV6_OFOS, 14 },
455 { ICE_UDP_ILOS, 54 },
456 { ICE_PROTOCOL_LAST, 0 },
459 /* IPv6 + UDP dummy packet */
460 static const u8 dummy_udp_ipv6_packet[] = {
461 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
462 0x00, 0x00, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00,
465 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
467 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
468 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
469 0x00, 0x00, 0x00, 0x00,
470 0x00, 0x00, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
479 0x00, 0x08, 0x00, 0x00,
481 0x00, 0x00, /* 2 bytes for 4 byte alignment */
484 /* C-tag (802.1Q): IPv6 + UDP */
485 static const struct ice_dummy_pkt_offsets
486 dummy_vlan_udp_ipv6_packet_offsets[] = {
488 { ICE_ETYPE_OL, 12 },
489 { ICE_VLAN_OFOS, 14 },
490 { ICE_IPV6_OFOS, 18 },
491 { ICE_UDP_ILOS, 58 },
492 { ICE_PROTOCOL_LAST, 0 },
495 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
496 static const u8 dummy_vlan_udp_ipv6_packet[] = {
497 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x81, 0x00, /* ICE_ETYPE_OL 12 */
503 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
505 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
506 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
507 0x00, 0x00, 0x00, 0x00,
508 0x00, 0x00, 0x00, 0x00,
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
517 0x00, 0x08, 0x00, 0x00,
519 0x00, 0x00, /* 2 bytes for 4 byte alignment */
522 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
524 { ICE_IPV4_OFOS, 14 },
527 { ICE_PROTOCOL_LAST, 0 },
530 static const u8 dummy_udp_gtp_packet[] = {
531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
532 0x00, 0x00, 0x00, 0x00,
533 0x00, 0x00, 0x00, 0x00,
536 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
537 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x11, 0x00, 0x00,
539 0x00, 0x00, 0x00, 0x00,
540 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
543 0x00, 0x1c, 0x00, 0x00,
545 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
546 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x85,
549 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
550 0x00, 0x00, 0x00, 0x00,
553 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
555 { ICE_ETYPE_OL, 12 },
556 { ICE_VLAN_OFOS, 14},
558 { ICE_PROTOCOL_LAST, 0 },
561 static const u8 dummy_pppoe_packet[] = {
562 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00,
566 0x81, 0x00, /* ICE_ETYPE_OL 12 */
568 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
570 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
573 0x00, 0x21, /* PPP Link Layer 24 */
575 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
584 /* this is a recipe to profile association bitmap */
585 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
586 ICE_MAX_NUM_PROFILES);
588 /* this is a profile to recipe association bitmap */
589 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
590 ICE_MAX_NUM_RECIPES);
592 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
595 * ice_collect_result_idx - copy result index values
596 * @buf: buffer that contains the result index
597 * @recp: the recipe struct to copy data into
599 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
600 struct ice_sw_recipe *recp)
602 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
603 ice_set_bit(buf->content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
608 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
609 * @hw: pointer to hardware structure
610 * @recps: struct that we need to populate
611 * @rid: recipe ID that we are populating
612 * @refresh_required: true if we should get recipe to profile mapping from FW
614 * This function is used to populate all the necessary entries into our
615 * bookkeeping so that we have a current list of all the recipes that are
616 * programmed in the firmware.
618 static enum ice_status
619 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
620 bool *refresh_required)
622 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
623 struct ice_aqc_recipe_data_elem *tmp;
624 u16 num_recps = ICE_MAX_NUM_RECIPES;
625 struct ice_prot_lkup_ext *lkup_exts;
626 u16 i, sub_recps, fv_word_idx = 0;
627 enum ice_status status;
629 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
631 /* we need a buffer big enough to accommodate all the recipes */
632 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
633 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
635 return ICE_ERR_NO_MEMORY;
637 tmp[0].recipe_indx = rid;
638 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
639 /* non-zero status meaning recipe doesn't exist */
643 /* Get recipe to profile map so that we can get the fv from lkups that
644 * we read for a recipe from FW. Since we want to minimize the number of
645 * times we make this FW call, just make one call and cache the copy
646 * until a new recipe is added. This operation is only required the
647 * first time to get the changes from FW. Then to search existing
648 * entries we don't need to update the cache again until another recipe
651 if (*refresh_required) {
652 ice_get_recp_to_prof_map(hw);
653 *refresh_required = false;
656 /* Start populating all the entries for recps[rid] based on lkups from
657 * firmware. Note that we are only creating the root recipe in our
660 lkup_exts = &recps[rid].lkup_exts;
662 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
663 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
664 struct ice_recp_grp_entry *rg_entry;
665 u8 prof, idx, prot = 0;
669 rg_entry = (struct ice_recp_grp_entry *)
670 ice_malloc(hw, sizeof(*rg_entry));
672 status = ICE_ERR_NO_MEMORY;
676 idx = root_bufs.recipe_indx;
677 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
679 /* Mark all result indices in this chain */
680 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
681 ice_set_bit(root_bufs.content.result_indx &
682 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
684 /* get the first profile that is associated with rid */
685 prof = ice_find_first_bit(recipe_to_profile[idx],
686 ICE_MAX_NUM_PROFILES);
687 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
688 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
690 rg_entry->fv_idx[i] = lkup_indx;
691 rg_entry->fv_mask[i] =
692 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
694 /* If the recipe is a chained recipe then all its
695 * child recipe's result will have a result index.
696 * To fill fv_words we should not use those result
697 * index, we only need the protocol ids and offsets.
698 * We will skip all the fv_idx which stores result
699 * index in them. We also need to skip any fv_idx which
700 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
701 * valid offset value.
703 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
704 rg_entry->fv_idx[i]) ||
705 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
706 rg_entry->fv_idx[i] == 0)
709 ice_find_prot_off(hw, ICE_BLK_SW, prof,
710 rg_entry->fv_idx[i], &prot, &off);
711 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
712 lkup_exts->fv_words[fv_word_idx].off = off;
715 /* populate rg_list with the data from the child entry of this
718 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
720 /* Propagate some data to the recipe database */
721 recps[idx].is_root = is_root;
722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
723 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
724 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
725 recps[idx].chain_idx = root_bufs.content.result_indx &
726 ~ICE_AQ_RECIPE_RESULT_EN;
727 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
729 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
735 /* Only do the following for root recipes entries */
736 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
737 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
738 recps[idx].root_rid = root_bufs.content.rid &
739 ~ICE_AQ_RECIPE_ID_IS_ROOT;
740 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
743 /* Complete initialization of the root recipe entry */
744 lkup_exts->n_val_words = fv_word_idx;
745 recps[rid].big_recp = (num_recps > 1);
746 recps[rid].n_grp_count = (u8)num_recps;
747 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
748 ice_memdup(hw, tmp, recps[rid].n_grp_count *
749 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
750 if (!recps[rid].root_buf)
753 /* Copy result indexes */
754 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
755 recps[rid].recp_created = true;
763 * ice_get_recp_to_prof_map - updates recipe to profile mapping
764 * @hw: pointer to hardware structure
766 * This function is used to populate recipe_to_profile matrix where index to
767 * this array is the recipe ID and the element is the mapping of which profiles
768 * is this recipe mapped to.
771 ice_get_recp_to_prof_map(struct ice_hw *hw)
773 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
776 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
779 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
780 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
781 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
783 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
784 ICE_MAX_NUM_RECIPES);
785 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
786 if (ice_is_bit_set(r_bitmap, j))
787 ice_set_bit(i, recipe_to_profile[j]);
792 * ice_init_def_sw_recp - initialize the recipe book keeping tables
793 * @hw: pointer to the HW struct
794 * @recp_list: pointer to sw recipe list
796 * Allocate memory for the entire recipe table and initialize the structures/
797 * entries corresponding to basic recipes.
800 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
802 struct ice_sw_recipe *recps;
805 recps = (struct ice_sw_recipe *)
806 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
808 return ICE_ERR_NO_MEMORY;
810 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
811 recps[i].root_rid = i;
812 INIT_LIST_HEAD(&recps[i].filt_rules);
813 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
814 INIT_LIST_HEAD(&recps[i].rg_list);
815 ice_init_lock(&recps[i].filt_rule_lock);
824 * ice_aq_get_sw_cfg - get switch configuration
825 * @hw: pointer to the hardware structure
826 * @buf: pointer to the result buffer
827 * @buf_size: length of the buffer available for response
828 * @req_desc: pointer to requested descriptor
829 * @num_elems: pointer to number of elements
830 * @cd: pointer to command details structure or NULL
832 * Get switch configuration (0x0200) to be placed in 'buff'.
833 * This admin command returns information such as initial VSI/port number
834 * and switch ID it belongs to.
836 * NOTE: *req_desc is both an input/output parameter.
837 * The caller of this function first calls this function with *request_desc set
838 * to 0. If the response from f/w has *req_desc set to 0, all the switch
839 * configuration information has been returned; if non-zero (meaning not all
840 * the information was returned), the caller should call this function again
841 * with *req_desc set to the previous value returned by f/w to get the
842 * next block of switch configuration information.
844 * *num_elems is output only parameter. This reflects the number of elements
845 * in response buffer. The caller of this function to use *num_elems while
846 * parsing the response buffer.
848 static enum ice_status
849 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
850 u16 buf_size, u16 *req_desc, u16 *num_elems,
851 struct ice_sq_cd *cd)
853 struct ice_aqc_get_sw_cfg *cmd;
854 enum ice_status status;
855 struct ice_aq_desc desc;
857 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
858 cmd = &desc.params.get_sw_conf;
859 cmd->element = CPU_TO_LE16(*req_desc);
861 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
863 *req_desc = LE16_TO_CPU(cmd->element);
864 *num_elems = LE16_TO_CPU(cmd->num_elems);
871 * ice_alloc_sw - allocate resources specific to switch
872 * @hw: pointer to the HW struct
873 * @ena_stats: true to turn on VEB stats
874 * @shared_res: true for shared resource, false for dedicated resource
875 * @sw_id: switch ID returned
876 * @counter_id: VEB counter ID returned
878 * allocates switch resources (SWID and VEB counter) (0x0208)
881 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
884 struct ice_aqc_alloc_free_res_elem *sw_buf;
885 struct ice_aqc_res_elem *sw_ele;
886 enum ice_status status;
889 buf_len = sizeof(*sw_buf);
890 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
891 ice_malloc(hw, buf_len);
893 return ICE_ERR_NO_MEMORY;
895 /* Prepare buffer for switch ID.
896 * The number of resource entries in buffer is passed as 1 since only a
897 * single switch/VEB instance is allocated, and hence a single sw_id
900 sw_buf->num_elems = CPU_TO_LE16(1);
902 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
903 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
904 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
906 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
907 ice_aqc_opc_alloc_res, NULL);
910 goto ice_alloc_sw_exit;
912 sw_ele = &sw_buf->elem[0];
913 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
916 /* Prepare buffer for VEB Counter */
917 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
918 struct ice_aqc_alloc_free_res_elem *counter_buf;
919 struct ice_aqc_res_elem *counter_ele;
921 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
922 ice_malloc(hw, buf_len);
924 status = ICE_ERR_NO_MEMORY;
925 goto ice_alloc_sw_exit;
928 /* The number of resource entries in buffer is passed as 1 since
929 * only a single switch/VEB instance is allocated, and hence a
930 * single VEB counter is requested.
932 counter_buf->num_elems = CPU_TO_LE16(1);
933 counter_buf->res_type =
934 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
935 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
936 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
940 ice_free(hw, counter_buf);
941 goto ice_alloc_sw_exit;
943 counter_ele = &counter_buf->elem[0];
944 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
945 ice_free(hw, counter_buf);
949 ice_free(hw, sw_buf);
954 * ice_free_sw - free resources specific to switch
955 * @hw: pointer to the HW struct
956 * @sw_id: switch ID returned
957 * @counter_id: VEB counter ID returned
959 * free switch resources (SWID and VEB counter) (0x0209)
961 * NOTE: This function frees multiple resources. It continues
962 * releasing other resources even after it encounters error.
963 * The error code returned is the last error it encountered.
965 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
967 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
968 enum ice_status status, ret_status;
971 buf_len = sizeof(*sw_buf);
972 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
973 ice_malloc(hw, buf_len);
975 return ICE_ERR_NO_MEMORY;
977 /* Prepare buffer to free for switch ID res.
978 * The number of resource entries in buffer is passed as 1 since only a
979 * single switch/VEB instance is freed, and hence a single sw_id
982 sw_buf->num_elems = CPU_TO_LE16(1);
983 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
984 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
986 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
987 ice_aqc_opc_free_res, NULL);
990 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
992 /* Prepare buffer to free for VEB Counter resource */
993 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
994 ice_malloc(hw, buf_len);
996 ice_free(hw, sw_buf);
997 return ICE_ERR_NO_MEMORY;
1000 /* The number of resource entries in buffer is passed as 1 since only a
1001 * single switch/VEB instance is freed, and hence a single VEB counter
1004 counter_buf->num_elems = CPU_TO_LE16(1);
1005 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1006 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1008 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1009 ice_aqc_opc_free_res, NULL);
1011 ice_debug(hw, ICE_DBG_SW,
1012 "VEB counter resource could not be freed\n");
1013 ret_status = status;
1016 ice_free(hw, counter_buf);
1017 ice_free(hw, sw_buf);
1023 * @hw: pointer to the HW struct
1024 * @vsi_ctx: pointer to a VSI context struct
1025 * @cd: pointer to command details structure or NULL
1027 * Add a VSI context to the hardware (0x0210)
1030 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1031 struct ice_sq_cd *cd)
1033 struct ice_aqc_add_update_free_vsi_resp *res;
1034 struct ice_aqc_add_get_update_free_vsi *cmd;
1035 struct ice_aq_desc desc;
1036 enum ice_status status;
1038 cmd = &desc.params.vsi_cmd;
1039 res = &desc.params.add_update_free_vsi_res;
1041 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1043 if (!vsi_ctx->alloc_from_pool)
1044 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1045 ICE_AQ_VSI_IS_VALID);
1047 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1049 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1051 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1052 sizeof(vsi_ctx->info), cd);
1055 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1056 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1057 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1065 * @hw: pointer to the HW struct
1066 * @vsi_ctx: pointer to a VSI context struct
1067 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1068 * @cd: pointer to command details structure or NULL
1070 * Free VSI context info from hardware (0x0213)
1073 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1074 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1076 struct ice_aqc_add_update_free_vsi_resp *resp;
1077 struct ice_aqc_add_get_update_free_vsi *cmd;
1078 struct ice_aq_desc desc;
1079 enum ice_status status;
1081 cmd = &desc.params.vsi_cmd;
1082 resp = &desc.params.add_update_free_vsi_res;
1084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1086 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1088 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1090 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1092 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1093 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1101 * @hw: pointer to the HW struct
1102 * @vsi_ctx: pointer to a VSI context struct
1103 * @cd: pointer to command details structure or NULL
1105 * Update VSI context in the hardware (0x0211)
1108 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1109 struct ice_sq_cd *cd)
1111 struct ice_aqc_add_update_free_vsi_resp *resp;
1112 struct ice_aqc_add_get_update_free_vsi *cmd;
1113 struct ice_aq_desc desc;
1114 enum ice_status status;
1116 cmd = &desc.params.vsi_cmd;
1117 resp = &desc.params.add_update_free_vsi_res;
1119 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1121 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1123 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1125 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1126 sizeof(vsi_ctx->info), cd);
1129 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1130 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1137 * ice_is_vsi_valid - check whether the VSI is valid or not
1138 * @hw: pointer to the HW struct
1139 * @vsi_handle: VSI handle
1141 * check whether the VSI is valid or not
1143 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1145 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1149 * ice_get_hw_vsi_num - return the HW VSI number
1150 * @hw: pointer to the HW struct
1151 * @vsi_handle: VSI handle
1153 * return the HW VSI number
1154 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1156 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1158 return hw->vsi_ctx[vsi_handle]->vsi_num;
1162 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1163 * @hw: pointer to the HW struct
1164 * @vsi_handle: VSI handle
1166 * return the VSI context entry for a given VSI handle
1168 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1170 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1174 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1175 * @hw: pointer to the HW struct
1176 * @vsi_handle: VSI handle
1177 * @vsi: VSI context pointer
1179 * save the VSI context entry for a given VSI handle
1182 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1184 hw->vsi_ctx[vsi_handle] = vsi;
1188 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1189 * @hw: pointer to the HW struct
1190 * @vsi_handle: VSI handle
1192 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1194 struct ice_vsi_ctx *vsi;
1197 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1200 ice_for_each_traffic_class(i) {
1201 if (vsi->lan_q_ctx[i]) {
1202 ice_free(hw, vsi->lan_q_ctx[i]);
1203 vsi->lan_q_ctx[i] = NULL;
1209 * ice_clear_vsi_ctx - clear the VSI context entry
1210 * @hw: pointer to the HW struct
1211 * @vsi_handle: VSI handle
1213 * clear the VSI context entry
1215 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1217 struct ice_vsi_ctx *vsi;
1219 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1221 ice_clear_vsi_q_ctx(hw, vsi_handle);
1223 hw->vsi_ctx[vsi_handle] = NULL;
1228 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1229 * @hw: pointer to the HW struct
1231 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1235 for (i = 0; i < ICE_MAX_VSI; i++)
1236 ice_clear_vsi_ctx(hw, i);
1240 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1241 * @hw: pointer to the HW struct
1242 * @vsi_handle: unique VSI handle provided by drivers
1243 * @vsi_ctx: pointer to a VSI context struct
1244 * @cd: pointer to command details structure or NULL
1246 * Add a VSI context to the hardware also add it into the VSI handle list.
1247 * If this function gets called after reset for existing VSIs then update
1248 * with the new HW VSI number in the corresponding VSI handle list entry.
1251 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1252 struct ice_sq_cd *cd)
1254 struct ice_vsi_ctx *tmp_vsi_ctx;
1255 enum ice_status status;
1257 if (vsi_handle >= ICE_MAX_VSI)
1258 return ICE_ERR_PARAM;
1259 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1262 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1264 /* Create a new VSI context */
1265 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1266 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1268 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1269 return ICE_ERR_NO_MEMORY;
1271 *tmp_vsi_ctx = *vsi_ctx;
1273 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1275 /* update with new HW VSI num */
1276 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1283 * ice_free_vsi- free VSI context from hardware and VSI handle list
1284 * @hw: pointer to the HW struct
1285 * @vsi_handle: unique VSI handle
1286 * @vsi_ctx: pointer to a VSI context struct
1287 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1288 * @cd: pointer to command details structure or NULL
1290 * Free VSI context info from hardware as well as from VSI handle list
1293 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1294 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1296 enum ice_status status;
1298 if (!ice_is_vsi_valid(hw, vsi_handle))
1299 return ICE_ERR_PARAM;
1300 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1301 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1303 ice_clear_vsi_ctx(hw, vsi_handle);
1309 * @hw: pointer to the HW struct
1310 * @vsi_handle: unique VSI handle
1311 * @vsi_ctx: pointer to a VSI context struct
1312 * @cd: pointer to command details structure or NULL
1314 * Update VSI context in the hardware
1317 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1318 struct ice_sq_cd *cd)
1320 if (!ice_is_vsi_valid(hw, vsi_handle))
1321 return ICE_ERR_PARAM;
1322 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1323 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1327 * ice_aq_get_vsi_params
1328 * @hw: pointer to the HW struct
1329 * @vsi_ctx: pointer to a VSI context struct
1330 * @cd: pointer to command details structure or NULL
1332 * Get VSI context info from hardware (0x0212)
1335 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1336 struct ice_sq_cd *cd)
1338 struct ice_aqc_add_get_update_free_vsi *cmd;
1339 struct ice_aqc_get_vsi_resp *resp;
1340 struct ice_aq_desc desc;
1341 enum ice_status status;
1343 cmd = &desc.params.vsi_cmd;
1344 resp = &desc.params.get_vsi_resp;
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1348 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1350 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1351 sizeof(vsi_ctx->info), cd);
1353 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1355 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1356 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1363 * ice_aq_add_update_mir_rule - add/update a mirror rule
1364 * @hw: pointer to the HW struct
1365 * @rule_type: Rule Type
1366 * @dest_vsi: VSI number to which packets will be mirrored
1367 * @count: length of the list
1368 * @mr_buf: buffer for list of mirrored VSI numbers
1369 * @cd: pointer to command details structure or NULL
1372 * Add/Update Mirror Rule (0x260).
1375 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1376 u16 count, struct ice_mir_rule_buf *mr_buf,
1377 struct ice_sq_cd *cd, u16 *rule_id)
1379 struct ice_aqc_add_update_mir_rule *cmd;
1380 struct ice_aq_desc desc;
1381 enum ice_status status;
1382 __le16 *mr_list = NULL;
1385 switch (rule_type) {
1386 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1387 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1388 /* Make sure count and mr_buf are set for these rule_types */
1389 if (!(count && mr_buf))
1390 return ICE_ERR_PARAM;
1392 buf_size = count * sizeof(__le16);
1393 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1395 return ICE_ERR_NO_MEMORY;
1397 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1398 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1399 /* Make sure count and mr_buf are not set for these
1402 if (count || mr_buf)
1403 return ICE_ERR_PARAM;
1406 ice_debug(hw, ICE_DBG_SW,
1407 "Error due to unsupported rule_type %u\n", rule_type);
1408 return ICE_ERR_OUT_OF_RANGE;
1411 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1413 /* Pre-process 'mr_buf' items for add/update of virtual port
1414 * ingress/egress mirroring (but not physical port ingress/egress
1420 for (i = 0; i < count; i++) {
1423 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1425 /* Validate specified VSI number, make sure it is less
1426 * than ICE_MAX_VSI, if not return with error.
1428 if (id >= ICE_MAX_VSI) {
1429 ice_debug(hw, ICE_DBG_SW,
1430 "Error VSI index (%u) out-of-range\n",
1432 ice_free(hw, mr_list);
1433 return ICE_ERR_OUT_OF_RANGE;
1436 /* add VSI to mirror rule */
1439 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1440 else /* remove VSI from mirror rule */
1441 mr_list[i] = CPU_TO_LE16(id);
1445 cmd = &desc.params.add_update_rule;
1446 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1447 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1448 ICE_AQC_RULE_ID_VALID_M);
1449 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1450 cmd->num_entries = CPU_TO_LE16(count);
1451 cmd->dest = CPU_TO_LE16(dest_vsi);
1453 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1455 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1457 ice_free(hw, mr_list);
1463 * ice_aq_delete_mir_rule - delete a mirror rule
1464 * @hw: pointer to the HW struct
1465 * @rule_id: Mirror rule ID (to be deleted)
1466 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1467 * otherwise it is returned to the shared pool
1468 * @cd: pointer to command details structure or NULL
1470 * Delete Mirror Rule (0x261).
1473 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1474 struct ice_sq_cd *cd)
1476 struct ice_aqc_delete_mir_rule *cmd;
1477 struct ice_aq_desc desc;
1479 /* rule_id should be in the range 0...63 */
1480 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1481 return ICE_ERR_OUT_OF_RANGE;
1483 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1485 cmd = &desc.params.del_rule;
1486 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1487 cmd->rule_id = CPU_TO_LE16(rule_id);
1490 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1492 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1496 * ice_aq_alloc_free_vsi_list
1497 * @hw: pointer to the HW struct
1498 * @vsi_list_id: VSI list ID returned or used for lookup
1499 * @lkup_type: switch rule filter lookup type
1500 * @opc: switch rules population command type - pass in the command opcode
1502 * allocates or free a VSI list resource
1504 static enum ice_status
1505 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1506 enum ice_sw_lkup_type lkup_type,
1507 enum ice_adminq_opc opc)
1509 struct ice_aqc_alloc_free_res_elem *sw_buf;
1510 struct ice_aqc_res_elem *vsi_ele;
1511 enum ice_status status;
1514 buf_len = sizeof(*sw_buf);
1515 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1516 ice_malloc(hw, buf_len);
1518 return ICE_ERR_NO_MEMORY;
1519 sw_buf->num_elems = CPU_TO_LE16(1);
1521 if (lkup_type == ICE_SW_LKUP_MAC ||
1522 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1523 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1524 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1525 lkup_type == ICE_SW_LKUP_PROMISC ||
1526 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1527 lkup_type == ICE_SW_LKUP_LAST) {
1528 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1529 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1531 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1533 status = ICE_ERR_PARAM;
1534 goto ice_aq_alloc_free_vsi_list_exit;
1537 if (opc == ice_aqc_opc_free_res)
1538 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1540 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1542 goto ice_aq_alloc_free_vsi_list_exit;
1544 if (opc == ice_aqc_opc_alloc_res) {
1545 vsi_ele = &sw_buf->elem[0];
1546 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1549 ice_aq_alloc_free_vsi_list_exit:
1550 ice_free(hw, sw_buf);
1555 * ice_aq_set_storm_ctrl - Sets storm control configuration
1556 * @hw: pointer to the HW struct
1557 * @bcast_thresh: represents the upper threshold for broadcast storm control
1558 * @mcast_thresh: represents the upper threshold for multicast storm control
1559 * @ctl_bitmask: storm control control knobs
1561 * Sets the storm control configuration (0x0280)
1564 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1567 struct ice_aqc_storm_cfg *cmd;
1568 struct ice_aq_desc desc;
1570 cmd = &desc.params.storm_conf;
1572 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1574 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1575 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1576 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1578 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1582 * ice_aq_get_storm_ctrl - gets storm control configuration
1583 * @hw: pointer to the HW struct
1584 * @bcast_thresh: represents the upper threshold for broadcast storm control
1585 * @mcast_thresh: represents the upper threshold for multicast storm control
1586 * @ctl_bitmask: storm control control knobs
1588 * Gets the storm control configuration (0x0281)
1591 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1594 enum ice_status status;
1595 struct ice_aq_desc desc;
1597 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1599 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1601 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1604 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1607 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1610 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1617 * ice_aq_sw_rules - add/update/remove switch rules
1618 * @hw: pointer to the HW struct
1619 * @rule_list: pointer to switch rule population list
1620 * @rule_list_sz: total size of the rule list in bytes
1621 * @num_rules: number of switch rules in the rule_list
1622 * @opc: switch rules population command type - pass in the command opcode
1623 * @cd: pointer to command details structure or NULL
1625 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1627 static enum ice_status
1628 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1629 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1631 struct ice_aq_desc desc;
1633 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1635 if (opc != ice_aqc_opc_add_sw_rules &&
1636 opc != ice_aqc_opc_update_sw_rules &&
1637 opc != ice_aqc_opc_remove_sw_rules)
1638 return ICE_ERR_PARAM;
1640 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1642 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1643 desc.params.sw_rules.num_rules_fltr_entry_index =
1644 CPU_TO_LE16(num_rules);
1645 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1649 * ice_aq_add_recipe - add switch recipe
1650 * @hw: pointer to the HW struct
1651 * @s_recipe_list: pointer to switch rule population list
1652 * @num_recipes: number of switch recipes in the list
1653 * @cd: pointer to command details structure or NULL
1658 ice_aq_add_recipe(struct ice_hw *hw,
1659 struct ice_aqc_recipe_data_elem *s_recipe_list,
1660 u16 num_recipes, struct ice_sq_cd *cd)
1662 struct ice_aqc_add_get_recipe *cmd;
1663 struct ice_aq_desc desc;
1666 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1667 cmd = &desc.params.add_get_recipe;
1668 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1670 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1671 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1673 buf_size = num_recipes * sizeof(*s_recipe_list);
1675 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1679 * ice_aq_get_recipe - get switch recipe
1680 * @hw: pointer to the HW struct
1681 * @s_recipe_list: pointer to switch rule population list
1682 * @num_recipes: pointer to the number of recipes (input and output)
1683 * @recipe_root: root recipe number of recipe(s) to retrieve
1684 * @cd: pointer to command details structure or NULL
1688 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1689 * On output, *num_recipes will equal the number of entries returned in
1692 * The caller must supply enough space in s_recipe_list to hold all possible
1693 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1696 ice_aq_get_recipe(struct ice_hw *hw,
1697 struct ice_aqc_recipe_data_elem *s_recipe_list,
1698 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1700 struct ice_aqc_add_get_recipe *cmd;
1701 struct ice_aq_desc desc;
1702 enum ice_status status;
1705 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1706 return ICE_ERR_PARAM;
1708 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1709 cmd = &desc.params.add_get_recipe;
1710 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1712 cmd->return_index = CPU_TO_LE16(recipe_root);
1713 cmd->num_sub_recipes = 0;
1715 buf_size = *num_recipes * sizeof(*s_recipe_list);
1717 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1718 /* cppcheck-suppress constArgument */
1719 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1725 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1726 * @hw: pointer to the HW struct
1727 * @profile_id: package profile ID to associate the recipe with
1728 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1729 * @cd: pointer to command details structure or NULL
1730 * Recipe to profile association (0x0291)
1733 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1734 struct ice_sq_cd *cd)
1736 struct ice_aqc_recipe_to_profile *cmd;
1737 struct ice_aq_desc desc;
1739 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1740 cmd = &desc.params.recipe_to_profile;
1741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1742 cmd->profile_id = CPU_TO_LE16(profile_id);
1743 /* Set the recipe ID bit in the bitmask to let the device know which
1744 * profile we are associating the recipe to
1746 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1747 ICE_NONDMA_TO_NONDMA);
1749 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1753 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1754 * @hw: pointer to the HW struct
1755 * @profile_id: package profile ID to associate the recipe with
1756 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1757 * @cd: pointer to command details structure or NULL
1758 * Associate profile ID with given recipe (0x0293)
1761 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1762 struct ice_sq_cd *cd)
1764 struct ice_aqc_recipe_to_profile *cmd;
1765 struct ice_aq_desc desc;
1766 enum ice_status status;
1768 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1769 cmd = &desc.params.recipe_to_profile;
1770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1771 cmd->profile_id = CPU_TO_LE16(profile_id);
1773 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1775 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1776 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1782 * ice_alloc_recipe - add recipe resource
1783 * @hw: pointer to the hardware structure
1784 * @rid: recipe ID returned as response to AQ call
1786 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1788 struct ice_aqc_alloc_free_res_elem *sw_buf;
1789 enum ice_status status;
1792 buf_len = sizeof(*sw_buf);
1793 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1795 return ICE_ERR_NO_MEMORY;
1797 sw_buf->num_elems = CPU_TO_LE16(1);
1798 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1799 ICE_AQC_RES_TYPE_S) |
1800 ICE_AQC_RES_TYPE_FLAG_SHARED);
1801 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802 ice_aqc_opc_alloc_res, NULL);
1804 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1805 ice_free(hw, sw_buf);
1810 /* ice_init_port_info - Initialize port_info with switch configuration data
1811 * @pi: pointer to port_info
1812 * @vsi_port_num: VSI number or port number
1813 * @type: Type of switch element (port or VSI)
1814 * @swid: switch ID of the switch the element is attached to
1815 * @pf_vf_num: PF or VF number
1816 * @is_vf: true if the element is a VF, false otherwise
1819 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1820 u16 swid, u16 pf_vf_num, bool is_vf)
1823 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1824 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1826 pi->pf_vf_num = pf_vf_num;
1828 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1829 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1832 ice_debug(pi->hw, ICE_DBG_SW,
1833 "incorrect VSI/port type received\n");
1838 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1839 * @hw: pointer to the hardware structure
1841 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1843 struct ice_aqc_get_sw_cfg_resp *rbuf;
1844 enum ice_status status;
1845 u16 num_total_ports;
1851 num_total_ports = 1;
1853 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1854 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1857 return ICE_ERR_NO_MEMORY;
1859 /* Multiple calls to ice_aq_get_sw_cfg may be required
1860 * to get all the switch configuration information. The need
1861 * for additional calls is indicated by ice_aq_get_sw_cfg
1862 * writing a non-zero value in req_desc
1865 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1866 &req_desc, &num_elems, NULL);
1871 for (i = 0; i < num_elems; i++) {
1872 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1873 u16 pf_vf_num, swid, vsi_port_num;
1877 ele = rbuf[i].elements;
1878 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1879 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1881 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1882 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1884 swid = LE16_TO_CPU(ele->swid);
1886 if (LE16_TO_CPU(ele->pf_vf_num) &
1887 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1890 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1891 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1894 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1895 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1896 if (j == num_total_ports) {
1897 ice_debug(hw, ICE_DBG_SW,
1898 "more ports than expected\n");
1899 status = ICE_ERR_CFG;
1902 ice_init_port_info(hw->port_info,
1903 vsi_port_num, res_type, swid,
1911 } while (req_desc && !status);
1914 ice_free(hw, (void *)rbuf);
1919 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1920 * @hw: pointer to the hardware structure
1921 * @fi: filter info structure to fill/update
1923 * This helper function populates the lb_en and lan_en elements of the provided
1924 * ice_fltr_info struct using the switch's type and characteristics of the
1925 * switch rule being configured.
1927 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1931 if ((fi->flag & ICE_FLTR_TX) &&
1932 (fi->fltr_act == ICE_FWD_TO_VSI ||
1933 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1934 fi->fltr_act == ICE_FWD_TO_Q ||
1935 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1936 /* Setting LB for prune actions will result in replicated
1937 * packets to the internal switch that will be dropped.
1939 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1942 /* Set lan_en to TRUE if
1943 * 1. The switch is a VEB AND
1945 * 2.1 The lookup is a directional lookup like ethertype,
1946 * promiscuous, ethertype-MAC, promiscuous-VLAN
1947 * and default-port OR
1948 * 2.2 The lookup is VLAN, OR
1949 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1950 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1954 * The switch is a VEPA.
1956 * In all other cases, the LAN enable has to be set to false.
1959 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1960 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1961 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1962 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1963 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1964 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1965 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1966 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1967 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1968 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1977 * ice_fill_sw_rule - Helper function to fill switch rule structure
1978 * @hw: pointer to the hardware structure
1979 * @f_info: entry containing packet forwarding information
1980 * @s_rule: switch rule structure to be filled in based on mac_entry
1981 * @opc: switch rules population command type - pass in the command opcode
1984 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1985 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1987 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1995 if (opc == ice_aqc_opc_remove_sw_rules) {
1996 s_rule->pdata.lkup_tx_rx.act = 0;
1997 s_rule->pdata.lkup_tx_rx.index =
1998 CPU_TO_LE16(f_info->fltr_rule_id);
1999 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2003 eth_hdr_sz = sizeof(dummy_eth_header);
2004 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2006 /* initialize the ether header with a dummy header */
2007 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2008 ice_fill_sw_info(hw, f_info);
2010 switch (f_info->fltr_act) {
2011 case ICE_FWD_TO_VSI:
2012 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2013 ICE_SINGLE_ACT_VSI_ID_M;
2014 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2015 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2016 ICE_SINGLE_ACT_VALID_BIT;
2018 case ICE_FWD_TO_VSI_LIST:
2019 act |= ICE_SINGLE_ACT_VSI_LIST;
2020 act |= (f_info->fwd_id.vsi_list_id <<
2021 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2022 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2023 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2024 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2025 ICE_SINGLE_ACT_VALID_BIT;
2028 act |= ICE_SINGLE_ACT_TO_Q;
2029 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2030 ICE_SINGLE_ACT_Q_INDEX_M;
2032 case ICE_DROP_PACKET:
2033 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2034 ICE_SINGLE_ACT_VALID_BIT;
2036 case ICE_FWD_TO_QGRP:
2037 q_rgn = f_info->qgrp_size > 0 ?
2038 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2039 act |= ICE_SINGLE_ACT_TO_Q;
2040 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2041 ICE_SINGLE_ACT_Q_INDEX_M;
2042 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2043 ICE_SINGLE_ACT_Q_REGION_M;
2050 act |= ICE_SINGLE_ACT_LB_ENABLE;
2052 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2054 switch (f_info->lkup_type) {
2055 case ICE_SW_LKUP_MAC:
2056 daddr = f_info->l_data.mac.mac_addr;
2058 case ICE_SW_LKUP_VLAN:
2059 vlan_id = f_info->l_data.vlan.vlan_id;
2060 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2061 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2062 act |= ICE_SINGLE_ACT_PRUNE;
2063 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2066 case ICE_SW_LKUP_ETHERTYPE_MAC:
2067 daddr = f_info->l_data.ethertype_mac.mac_addr;
2069 case ICE_SW_LKUP_ETHERTYPE:
2070 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2071 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2073 case ICE_SW_LKUP_MAC_VLAN:
2074 daddr = f_info->l_data.mac_vlan.mac_addr;
2075 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2077 case ICE_SW_LKUP_PROMISC_VLAN:
2078 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2080 case ICE_SW_LKUP_PROMISC:
2081 daddr = f_info->l_data.mac_vlan.mac_addr;
2087 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2088 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2089 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2091 /* Recipe set depending on lookup type */
2092 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2093 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2094 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2097 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2098 ICE_NONDMA_TO_NONDMA);
2100 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2101 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2102 *off = CPU_TO_BE16(vlan_id);
2105 /* Create the switch rule with the final dummy Ethernet header */
2106 if (opc != ice_aqc_opc_update_sw_rules)
2107 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2111 * ice_add_marker_act
2112 * @hw: pointer to the hardware structure
2113 * @m_ent: the management entry for which sw marker needs to be added
2114 * @sw_marker: sw marker to tag the Rx descriptor with
2115 * @l_id: large action resource ID
2117 * Create a large action to hold software marker and update the switch rule
2118 * entry pointed by m_ent with newly created large action
2120 static enum ice_status
2121 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2122 u16 sw_marker, u16 l_id)
2124 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2125 /* For software marker we need 3 large actions
2126 * 1. FWD action: FWD TO VSI or VSI LIST
2127 * 2. GENERIC VALUE action to hold the profile ID
2128 * 3. GENERIC VALUE action to hold the software marker ID
2130 const u16 num_lg_acts = 3;
2131 enum ice_status status;
2137 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2138 return ICE_ERR_PARAM;
2140 /* Create two back-to-back switch rules and submit them to the HW using
2141 * one memory buffer:
2145 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2146 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2147 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2149 return ICE_ERR_NO_MEMORY;
2151 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2153 /* Fill in the first switch rule i.e. large action */
2154 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2155 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2156 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2158 /* First action VSI forwarding or VSI list forwarding depending on how
2161 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2162 m_ent->fltr_info.fwd_id.hw_vsi_id;
2164 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2165 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2166 ICE_LG_ACT_VSI_LIST_ID_M;
2167 if (m_ent->vsi_count > 1)
2168 act |= ICE_LG_ACT_VSI_LIST;
2169 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2171 /* Second action descriptor type */
2172 act = ICE_LG_ACT_GENERIC;
2174 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2175 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2177 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2178 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2180 /* Third action Marker value */
2181 act |= ICE_LG_ACT_GENERIC;
2182 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2183 ICE_LG_ACT_GENERIC_VALUE_M;
2185 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2187 /* call the fill switch rule to fill the lookup Tx Rx structure */
2188 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2189 ice_aqc_opc_update_sw_rules);
2191 /* Update the action to point to the large action ID */
2192 rx_tx->pdata.lkup_tx_rx.act =
2193 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2194 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2195 ICE_SINGLE_ACT_PTR_VAL_M));
2197 /* Use the filter rule ID of the previously created rule with single
2198 * act. Once the update happens, hardware will treat this as large
2201 rx_tx->pdata.lkup_tx_rx.index =
2202 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2204 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2205 ice_aqc_opc_update_sw_rules, NULL);
2207 m_ent->lg_act_idx = l_id;
2208 m_ent->sw_marker_id = sw_marker;
2211 ice_free(hw, lg_act);
2216 * ice_add_counter_act - add/update filter rule with counter action
2217 * @hw: pointer to the hardware structure
2218 * @m_ent: the management entry for which counter needs to be added
2219 * @counter_id: VLAN counter ID returned as part of allocate resource
2220 * @l_id: large action resource ID
2222 static enum ice_status
2223 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2224 u16 counter_id, u16 l_id)
2226 struct ice_aqc_sw_rules_elem *lg_act;
2227 struct ice_aqc_sw_rules_elem *rx_tx;
2228 enum ice_status status;
2229 /* 2 actions will be added while adding a large action counter */
2230 const int num_acts = 2;
2237 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2238 return ICE_ERR_PARAM;
2240 /* Create two back-to-back switch rules and submit them to the HW using
2241 * one memory buffer:
2245 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2246 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2247 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2250 return ICE_ERR_NO_MEMORY;
2252 rx_tx = (struct ice_aqc_sw_rules_elem *)
2253 ((u8 *)lg_act + lg_act_size);
2255 /* Fill in the first switch rule i.e. large action */
2256 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2257 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2258 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2260 /* First action VSI forwarding or VSI list forwarding depending on how
2263 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2264 m_ent->fltr_info.fwd_id.hw_vsi_id;
2266 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2267 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2268 ICE_LG_ACT_VSI_LIST_ID_M;
2269 if (m_ent->vsi_count > 1)
2270 act |= ICE_LG_ACT_VSI_LIST;
2271 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2273 /* Second action counter ID */
2274 act = ICE_LG_ACT_STAT_COUNT;
2275 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2276 ICE_LG_ACT_STAT_COUNT_M;
2277 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2279 /* call the fill switch rule to fill the lookup Tx Rx structure */
2280 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2281 ice_aqc_opc_update_sw_rules);
2283 act = ICE_SINGLE_ACT_PTR;
2284 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2285 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2287 /* Use the filter rule ID of the previously created rule with single
2288 * act. Once the update happens, hardware will treat this as large
2291 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2292 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2294 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2295 ice_aqc_opc_update_sw_rules, NULL);
2297 m_ent->lg_act_idx = l_id;
2298 m_ent->counter_index = counter_id;
2301 ice_free(hw, lg_act);
2306 * ice_create_vsi_list_map
2307 * @hw: pointer to the hardware structure
2308 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2309 * @num_vsi: number of VSI handles in the array
2310 * @vsi_list_id: VSI list ID generated as part of allocate resource
2312 * Helper function to create a new entry of VSI list ID to VSI mapping
2313 * using the given VSI list ID
2315 static struct ice_vsi_list_map_info *
2316 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2319 struct ice_switch_info *sw = hw->switch_info;
2320 struct ice_vsi_list_map_info *v_map;
2323 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2328 v_map->vsi_list_id = vsi_list_id;
2330 for (i = 0; i < num_vsi; i++)
2331 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2333 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2338 * ice_update_vsi_list_rule
2339 * @hw: pointer to the hardware structure
2340 * @vsi_handle_arr: array of VSI handles to form a VSI list
2341 * @num_vsi: number of VSI handles in the array
2342 * @vsi_list_id: VSI list ID generated as part of allocate resource
2343 * @remove: Boolean value to indicate if this is a remove action
2344 * @opc: switch rules population command type - pass in the command opcode
2345 * @lkup_type: lookup type of the filter
2347 * Call AQ command to add a new switch rule or update existing switch rule
2348 * using the given VSI list ID
2350 static enum ice_status
2351 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2352 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2353 enum ice_sw_lkup_type lkup_type)
2355 struct ice_aqc_sw_rules_elem *s_rule;
2356 enum ice_status status;
2362 return ICE_ERR_PARAM;
2364 if (lkup_type == ICE_SW_LKUP_MAC ||
2365 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2366 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2367 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2368 lkup_type == ICE_SW_LKUP_PROMISC ||
2369 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2370 lkup_type == ICE_SW_LKUP_LAST)
2371 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2372 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2373 else if (lkup_type == ICE_SW_LKUP_VLAN)
2374 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2375 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2377 return ICE_ERR_PARAM;
2379 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2380 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2382 return ICE_ERR_NO_MEMORY;
2383 for (i = 0; i < num_vsi; i++) {
2384 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2385 status = ICE_ERR_PARAM;
2388 /* AQ call requires hw_vsi_id(s) */
2389 s_rule->pdata.vsi_list.vsi[i] =
2390 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2393 s_rule->type = CPU_TO_LE16(rule_type);
2394 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2395 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2397 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2400 ice_free(hw, s_rule);
2405 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2406 * @hw: pointer to the HW struct
2407 * @vsi_handle_arr: array of VSI handles to form a VSI list
2408 * @num_vsi: number of VSI handles in the array
2409 * @vsi_list_id: stores the ID of the VSI list to be created
2410 * @lkup_type: switch rule filter's lookup type
2412 static enum ice_status
2413 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2414 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2416 enum ice_status status;
2418 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2419 ice_aqc_opc_alloc_res);
2423 /* Update the newly created VSI list to include the specified VSIs */
2424 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2425 *vsi_list_id, false,
2426 ice_aqc_opc_add_sw_rules, lkup_type);
2430 * ice_create_pkt_fwd_rule
2431 * @hw: pointer to the hardware structure
2432 * @recp_list: corresponding filter management list
2433 * @f_entry: entry containing packet forwarding information
2435 * Create switch rule with given filter information and add an entry
2436 * to the corresponding filter management list to track this switch rule
2439 static enum ice_status
2440 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2441 struct ice_fltr_list_entry *f_entry)
2443 struct ice_fltr_mgmt_list_entry *fm_entry;
2444 struct ice_aqc_sw_rules_elem *s_rule;
2445 enum ice_status status;
2447 s_rule = (struct ice_aqc_sw_rules_elem *)
2448 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2450 return ICE_ERR_NO_MEMORY;
2451 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2452 ice_malloc(hw, sizeof(*fm_entry));
2454 status = ICE_ERR_NO_MEMORY;
2455 goto ice_create_pkt_fwd_rule_exit;
2458 fm_entry->fltr_info = f_entry->fltr_info;
2460 /* Initialize all the fields for the management entry */
2461 fm_entry->vsi_count = 1;
2462 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2463 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2464 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2466 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2467 ice_aqc_opc_add_sw_rules);
2469 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2470 ice_aqc_opc_add_sw_rules, NULL);
2472 ice_free(hw, fm_entry);
2473 goto ice_create_pkt_fwd_rule_exit;
2476 f_entry->fltr_info.fltr_rule_id =
2477 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2478 fm_entry->fltr_info.fltr_rule_id =
2479 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2481 /* The book keeping entries will get removed when base driver
2482 * calls remove filter AQ command
2484 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2486 ice_create_pkt_fwd_rule_exit:
2487 ice_free(hw, s_rule);
2492 * ice_update_pkt_fwd_rule
2493 * @hw: pointer to the hardware structure
2494 * @f_info: filter information for switch rule
2496 * Call AQ command to update a previously created switch rule with a
2499 static enum ice_status
2500 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2502 struct ice_aqc_sw_rules_elem *s_rule;
2503 enum ice_status status;
2505 s_rule = (struct ice_aqc_sw_rules_elem *)
2506 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2508 return ICE_ERR_NO_MEMORY;
2510 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2512 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2514 /* Update switch rule with new rule set to forward VSI list */
2515 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2516 ice_aqc_opc_update_sw_rules, NULL);
2518 ice_free(hw, s_rule);
2523 * ice_update_sw_rule_bridge_mode
2524 * @hw: pointer to the HW struct
2526 * Updates unicast switch filter rules based on VEB/VEPA mode
2528 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2530 struct ice_switch_info *sw = hw->switch_info;
2531 struct ice_fltr_mgmt_list_entry *fm_entry;
2532 enum ice_status status = ICE_SUCCESS;
2533 struct LIST_HEAD_TYPE *rule_head;
2534 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2536 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2537 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2539 ice_acquire_lock(rule_lock);
2540 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2542 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2543 u8 *addr = fi->l_data.mac.mac_addr;
2545 /* Update unicast Tx rules to reflect the selected
2548 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2549 (fi->fltr_act == ICE_FWD_TO_VSI ||
2550 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2551 fi->fltr_act == ICE_FWD_TO_Q ||
2552 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2553 status = ice_update_pkt_fwd_rule(hw, fi);
2559 ice_release_lock(rule_lock);
2565 * ice_add_update_vsi_list
2566 * @hw: pointer to the hardware structure
2567 * @m_entry: pointer to current filter management list entry
2568 * @cur_fltr: filter information from the book keeping entry
2569 * @new_fltr: filter information with the new VSI to be added
2571 * Call AQ command to add or update previously created VSI list with new VSI.
2573 * Helper function to do book keeping associated with adding filter information
2574 * The algorithm to do the book keeping is described below :
2575 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2576 * if only one VSI has been added till now
2577 * Allocate a new VSI list and add two VSIs
2578 * to this list using switch rule command
2579 * Update the previously created switch rule with the
2580 * newly created VSI list ID
2581 * if a VSI list was previously created
2582 * Add the new VSI to the previously created VSI list set
2583 * using the update switch rule command
2585 static enum ice_status
2586 ice_add_update_vsi_list(struct ice_hw *hw,
2587 struct ice_fltr_mgmt_list_entry *m_entry,
2588 struct ice_fltr_info *cur_fltr,
2589 struct ice_fltr_info *new_fltr)
2591 enum ice_status status = ICE_SUCCESS;
2592 u16 vsi_list_id = 0;
2594 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2595 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2596 return ICE_ERR_NOT_IMPL;
2598 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2599 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2600 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2601 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2602 return ICE_ERR_NOT_IMPL;
2604 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2605 /* Only one entry existed in the mapping and it was not already
2606 * a part of a VSI list. So, create a VSI list with the old and
2609 struct ice_fltr_info tmp_fltr;
2610 u16 vsi_handle_arr[2];
2612 /* A rule already exists with the new VSI being added */
2613 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2614 return ICE_ERR_ALREADY_EXISTS;
2616 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2617 vsi_handle_arr[1] = new_fltr->vsi_handle;
2618 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2620 new_fltr->lkup_type);
2624 tmp_fltr = *new_fltr;
2625 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2626 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2627 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2628 /* Update the previous switch rule of "MAC forward to VSI" to
2629 * "MAC fwd to VSI list"
2631 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2635 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2636 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2637 m_entry->vsi_list_info =
2638 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2641 /* If this entry was large action then the large action needs
2642 * to be updated to point to FWD to VSI list
2644 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2646 ice_add_marker_act(hw, m_entry,
2647 m_entry->sw_marker_id,
2648 m_entry->lg_act_idx);
2650 u16 vsi_handle = new_fltr->vsi_handle;
2651 enum ice_adminq_opc opcode;
2653 if (!m_entry->vsi_list_info)
2656 /* A rule already exists with the new VSI being added */
2657 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2660 /* Update the previously created VSI list set with
2661 * the new VSI ID passed in
2663 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2664 opcode = ice_aqc_opc_update_sw_rules;
2666 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2667 vsi_list_id, false, opcode,
2668 new_fltr->lkup_type);
2669 /* update VSI list mapping info with new VSI ID */
2671 ice_set_bit(vsi_handle,
2672 m_entry->vsi_list_info->vsi_map);
2675 m_entry->vsi_count++;
2680 * ice_find_rule_entry - Search a rule entry
2681 * @list_head: head of rule list
2682 * @f_info: rule information
2684 * Helper function to search for a given rule entry
2685 * Returns pointer to entry storing the rule if found
2687 static struct ice_fltr_mgmt_list_entry *
2688 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2689 struct ice_fltr_info *f_info)
2691 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2693 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2695 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2696 sizeof(f_info->l_data)) &&
2697 f_info->flag == list_itr->fltr_info.flag) {
2706 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2707 * @recp_list: VSI lists needs to be searched
2708 * @vsi_handle: VSI handle to be found in VSI list
2709 * @vsi_list_id: VSI list ID found containing vsi_handle
2711 * Helper function to search a VSI list with single entry containing given VSI
2712 * handle element. This can be extended further to search VSI list with more
2713 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2715 static struct ice_vsi_list_map_info *
2716 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2719 struct ice_vsi_list_map_info *map_info = NULL;
2720 struct LIST_HEAD_TYPE *list_head;
2722 list_head = &recp_list->filt_rules;
2723 if (recp_list->adv_rule) {
2724 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2726 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2727 ice_adv_fltr_mgmt_list_entry,
2729 if (list_itr->vsi_list_info) {
2730 map_info = list_itr->vsi_list_info;
2731 if (ice_is_bit_set(map_info->vsi_map,
2733 *vsi_list_id = map_info->vsi_list_id;
2739 struct ice_fltr_mgmt_list_entry *list_itr;
2741 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2742 ice_fltr_mgmt_list_entry,
2744 if (list_itr->vsi_count == 1 &&
2745 list_itr->vsi_list_info) {
2746 map_info = list_itr->vsi_list_info;
2747 if (ice_is_bit_set(map_info->vsi_map,
2749 *vsi_list_id = map_info->vsi_list_id;
2759 * ice_add_rule_internal - add rule for a given lookup type
2760 * @hw: pointer to the hardware structure
2761 * @recp_list: recipe list for which rule has to be added
2762 * @lport: logic port number on which function add rule
2763 * @f_entry: structure containing MAC forwarding information
2765 * Adds or updates the rule lists for a given recipe
2767 static enum ice_status
2768 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2769 u8 lport, struct ice_fltr_list_entry *f_entry)
2771 struct ice_fltr_info *new_fltr, *cur_fltr;
2772 struct ice_fltr_mgmt_list_entry *m_entry;
2773 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2774 enum ice_status status = ICE_SUCCESS;
2776 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2777 return ICE_ERR_PARAM;
2779 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2780 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2781 f_entry->fltr_info.fwd_id.hw_vsi_id =
2782 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2784 rule_lock = &recp_list->filt_rule_lock;
2786 ice_acquire_lock(rule_lock);
2787 new_fltr = &f_entry->fltr_info;
2788 if (new_fltr->flag & ICE_FLTR_RX)
2789 new_fltr->src = lport;
2790 else if (new_fltr->flag & ICE_FLTR_TX)
2792 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2794 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2796 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2797 goto exit_add_rule_internal;
2800 cur_fltr = &m_entry->fltr_info;
2801 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2803 exit_add_rule_internal:
2804 ice_release_lock(rule_lock);
2809 * ice_remove_vsi_list_rule
2810 * @hw: pointer to the hardware structure
2811 * @vsi_list_id: VSI list ID generated as part of allocate resource
2812 * @lkup_type: switch rule filter lookup type
2814 * The VSI list should be emptied before this function is called to remove the
2817 static enum ice_status
2818 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2819 enum ice_sw_lkup_type lkup_type)
2821 struct ice_aqc_sw_rules_elem *s_rule;
2822 enum ice_status status;
2825 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2826 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2828 return ICE_ERR_NO_MEMORY;
2830 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2831 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2833 /* Free the vsi_list resource that we allocated. It is assumed that the
2834 * list is empty at this point.
2836 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2837 ice_aqc_opc_free_res);
2839 ice_free(hw, s_rule);
2844 * ice_rem_update_vsi_list
2845 * @hw: pointer to the hardware structure
2846 * @vsi_handle: VSI handle of the VSI to remove
2847 * @fm_list: filter management entry for which the VSI list management needs to
2850 static enum ice_status
2851 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2852 struct ice_fltr_mgmt_list_entry *fm_list)
2854 enum ice_sw_lkup_type lkup_type;
2855 enum ice_status status = ICE_SUCCESS;
2858 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2859 fm_list->vsi_count == 0)
2860 return ICE_ERR_PARAM;
2862 /* A rule with the VSI being removed does not exist */
2863 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2864 return ICE_ERR_DOES_NOT_EXIST;
2866 lkup_type = fm_list->fltr_info.lkup_type;
2867 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2868 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2869 ice_aqc_opc_update_sw_rules,
2874 fm_list->vsi_count--;
2875 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2877 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2878 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2879 struct ice_vsi_list_map_info *vsi_list_info =
2880 fm_list->vsi_list_info;
2883 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2885 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2886 return ICE_ERR_OUT_OF_RANGE;
2888 /* Make sure VSI list is empty before removing it below */
2889 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2891 ice_aqc_opc_update_sw_rules,
2896 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2897 tmp_fltr_info.fwd_id.hw_vsi_id =
2898 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2899 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2900 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2902 ice_debug(hw, ICE_DBG_SW,
2903 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2904 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2908 fm_list->fltr_info = tmp_fltr_info;
2911 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2912 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2913 struct ice_vsi_list_map_info *vsi_list_info =
2914 fm_list->vsi_list_info;
2916 /* Remove the VSI list since it is no longer used */
2917 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2919 ice_debug(hw, ICE_DBG_SW,
2920 "Failed to remove VSI list %d, error %d\n",
2921 vsi_list_id, status);
2925 LIST_DEL(&vsi_list_info->list_entry);
2926 ice_free(hw, vsi_list_info);
2927 fm_list->vsi_list_info = NULL;
2934 * ice_remove_rule_internal - Remove a filter rule of a given type
2936 * @hw: pointer to the hardware structure
2937 * @recp_list: recipe list for which the rule needs to removed
2938 * @f_entry: rule entry containing filter information
2940 static enum ice_status
2941 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2942 struct ice_fltr_list_entry *f_entry)
2944 struct ice_fltr_mgmt_list_entry *list_elem;
2945 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2946 enum ice_status status = ICE_SUCCESS;
2947 bool remove_rule = false;
2950 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2951 return ICE_ERR_PARAM;
2952 f_entry->fltr_info.fwd_id.hw_vsi_id =
2953 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2955 rule_lock = &recp_list->filt_rule_lock;
2956 ice_acquire_lock(rule_lock);
2957 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2958 &f_entry->fltr_info);
2960 status = ICE_ERR_DOES_NOT_EXIST;
2964 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2966 } else if (!list_elem->vsi_list_info) {
2967 status = ICE_ERR_DOES_NOT_EXIST;
2969 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2970 /* a ref_cnt > 1 indicates that the vsi_list is being
2971 * shared by multiple rules. Decrement the ref_cnt and
2972 * remove this rule, but do not modify the list, as it
2973 * is in-use by other rules.
2975 list_elem->vsi_list_info->ref_cnt--;
2978 /* a ref_cnt of 1 indicates the vsi_list is only used
2979 * by one rule. However, the original removal request is only
2980 * for a single VSI. Update the vsi_list first, and only
2981 * remove the rule if there are no further VSIs in this list.
2983 vsi_handle = f_entry->fltr_info.vsi_handle;
2984 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2987 /* if VSI count goes to zero after updating the VSI list */
2988 if (list_elem->vsi_count == 0)
2993 /* Remove the lookup rule */
2994 struct ice_aqc_sw_rules_elem *s_rule;
2996 s_rule = (struct ice_aqc_sw_rules_elem *)
2997 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2999 status = ICE_ERR_NO_MEMORY;
3003 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3004 ice_aqc_opc_remove_sw_rules);
3006 status = ice_aq_sw_rules(hw, s_rule,
3007 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3008 ice_aqc_opc_remove_sw_rules, NULL);
3010 /* Remove a book keeping from the list */
3011 ice_free(hw, s_rule);
3016 LIST_DEL(&list_elem->list_entry);
3017 ice_free(hw, list_elem);
3020 ice_release_lock(rule_lock);
3025 * ice_aq_get_res_alloc - get allocated resources
3026 * @hw: pointer to the HW struct
3027 * @num_entries: pointer to u16 to store the number of resource entries returned
3028 * @buf: pointer to user-supplied buffer
3029 * @buf_size: size of buff
3030 * @cd: pointer to command details structure or NULL
3032 * The user-supplied buffer must be large enough to store the resource
3033 * information for all resource types. Each resource type is an
3034 * ice_aqc_get_res_resp_data_elem structure.
3037 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3038 u16 buf_size, struct ice_sq_cd *cd)
3040 struct ice_aqc_get_res_alloc *resp;
3041 enum ice_status status;
3042 struct ice_aq_desc desc;
3045 return ICE_ERR_BAD_PTR;
3047 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3048 return ICE_ERR_INVAL_SIZE;
3050 resp = &desc.params.get_res;
3052 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3053 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3055 if (!status && num_entries)
3056 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3062 * ice_aq_get_res_descs - get allocated resource descriptors
3063 * @hw: pointer to the hardware structure
3064 * @num_entries: number of resource entries in buffer
3065 * @buf: Indirect buffer to hold data parameters and response
3066 * @buf_size: size of buffer for indirect commands
3067 * @res_type: resource type
3068 * @res_shared: is resource shared
3069 * @desc_id: input - first desc ID to start; output - next desc ID
3070 * @cd: pointer to command details structure or NULL
3073 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3074 struct ice_aqc_get_allocd_res_desc_resp *buf,
3075 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3076 struct ice_sq_cd *cd)
3078 struct ice_aqc_get_allocd_res_desc *cmd;
3079 struct ice_aq_desc desc;
3080 enum ice_status status;
3082 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3084 cmd = &desc.params.get_res_desc;
3087 return ICE_ERR_PARAM;
3089 if (buf_size != (num_entries * sizeof(*buf)))
3090 return ICE_ERR_PARAM;
3092 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3094 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3095 ICE_AQC_RES_TYPE_M) | (res_shared ?
3096 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3097 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3099 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3101 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3107 * ice_add_mac_rule - Add a MAC address based filter rule
3108 * @hw: pointer to the hardware structure
3109 * @m_list: list of MAC addresses and forwarding information
3110 * @sw: pointer to switch info struct for which function add rule
3111 * @lport: logic port number on which function add rule
3113 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3114 * multiple unicast addresses, the function assumes that all the
3115 * addresses are unique in a given add_mac call. It doesn't
3116 * check for duplicates in this case, removing duplicates from a given
3117 * list should be taken care of in the caller of this function.
3119 static enum ice_status
3120 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3121 struct ice_switch_info *sw, u8 lport)
3123 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3124 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3125 struct ice_fltr_list_entry *m_list_itr;
3126 struct LIST_HEAD_TYPE *rule_head;
3127 u16 elem_sent, total_elem_left;
3128 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3129 enum ice_status status = ICE_SUCCESS;
3130 u16 num_unicast = 0;
3134 rule_lock = &recp_list->filt_rule_lock;
3135 rule_head = &recp_list->filt_rules;
3137 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3139 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3143 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3144 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3145 if (!ice_is_vsi_valid(hw, vsi_handle))
3146 return ICE_ERR_PARAM;
3147 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3148 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3149 /* update the src in case it is VSI num */
3150 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3151 return ICE_ERR_PARAM;
3152 m_list_itr->fltr_info.src = hw_vsi_id;
3153 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3154 IS_ZERO_ETHER_ADDR(add))
3155 return ICE_ERR_PARAM;
3156 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3157 /* Don't overwrite the unicast address */
3158 ice_acquire_lock(rule_lock);
3159 if (ice_find_rule_entry(rule_head,
3160 &m_list_itr->fltr_info)) {
3161 ice_release_lock(rule_lock);
3162 return ICE_ERR_ALREADY_EXISTS;
3164 ice_release_lock(rule_lock);
3166 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3167 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3168 m_list_itr->status =
3169 ice_add_rule_internal(hw, recp_list, lport,
3171 if (m_list_itr->status)
3172 return m_list_itr->status;
3176 ice_acquire_lock(rule_lock);
3177 /* Exit if no suitable entries were found for adding bulk switch rule */
3179 status = ICE_SUCCESS;
3180 goto ice_add_mac_exit;
3183 /* Allocate switch rule buffer for the bulk update for unicast */
3184 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3185 s_rule = (struct ice_aqc_sw_rules_elem *)
3186 ice_calloc(hw, num_unicast, s_rule_size);
3188 status = ICE_ERR_NO_MEMORY;
3189 goto ice_add_mac_exit;
3193 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3195 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3196 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3198 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3199 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3200 ice_aqc_opc_add_sw_rules);
3201 r_iter = (struct ice_aqc_sw_rules_elem *)
3202 ((u8 *)r_iter + s_rule_size);
3206 /* Call AQ bulk switch rule update for all unicast addresses */
3208 /* Call AQ switch rule in AQ_MAX chunk */
3209 for (total_elem_left = num_unicast; total_elem_left > 0;
3210 total_elem_left -= elem_sent) {
3211 struct ice_aqc_sw_rules_elem *entry = r_iter;
3213 elem_sent = min(total_elem_left,
3214 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3215 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3216 elem_sent, ice_aqc_opc_add_sw_rules,
3219 goto ice_add_mac_exit;
3220 r_iter = (struct ice_aqc_sw_rules_elem *)
3221 ((u8 *)r_iter + (elem_sent * s_rule_size));
3224 /* Fill up rule ID based on the value returned from FW */
3226 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3228 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3229 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3230 struct ice_fltr_mgmt_list_entry *fm_entry;
3232 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3233 f_info->fltr_rule_id =
3234 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3235 f_info->fltr_act = ICE_FWD_TO_VSI;
3236 /* Create an entry to track this MAC address */
3237 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3238 ice_malloc(hw, sizeof(*fm_entry));
3240 status = ICE_ERR_NO_MEMORY;
3241 goto ice_add_mac_exit;
3243 fm_entry->fltr_info = *f_info;
3244 fm_entry->vsi_count = 1;
3245 /* The book keeping entries will get removed when
3246 * base driver calls remove filter AQ command
3249 LIST_ADD(&fm_entry->list_entry, rule_head);
3250 r_iter = (struct ice_aqc_sw_rules_elem *)
3251 ((u8 *)r_iter + s_rule_size);
3256 ice_release_lock(rule_lock);
3258 ice_free(hw, s_rule);
3263 * ice_add_mac - Add a MAC address based filter rule
3264 * @hw: pointer to the hardware structure
3265 * @m_list: list of MAC addresses and forwarding information
3267 * Function add MAC rule for logical port from HW struct
3270 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3273 return ICE_ERR_PARAM;
3275 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3276 hw->port_info->lport);
3280 * ice_add_vlan_internal - Add one VLAN based filter rule
3281 * @hw: pointer to the hardware structure
3282 * @recp_list: recipe list for which rule has to be added
3283 * @f_entry: filter entry containing one VLAN information
3285 static enum ice_status
3286 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3287 struct ice_fltr_list_entry *f_entry)
3289 struct ice_fltr_mgmt_list_entry *v_list_itr;
3290 struct ice_fltr_info *new_fltr, *cur_fltr;
3291 enum ice_sw_lkup_type lkup_type;
3292 u16 vsi_list_id = 0, vsi_handle;
3293 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3294 enum ice_status status = ICE_SUCCESS;
3296 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3297 return ICE_ERR_PARAM;
3299 f_entry->fltr_info.fwd_id.hw_vsi_id =
3300 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3301 new_fltr = &f_entry->fltr_info;
3303 /* VLAN ID should only be 12 bits */
3304 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3305 return ICE_ERR_PARAM;
3307 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3308 return ICE_ERR_PARAM;
3310 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3311 lkup_type = new_fltr->lkup_type;
3312 vsi_handle = new_fltr->vsi_handle;
3313 rule_lock = &recp_list->filt_rule_lock;
3314 ice_acquire_lock(rule_lock);
3315 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3317 struct ice_vsi_list_map_info *map_info = NULL;
3319 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3320 /* All VLAN pruning rules use a VSI list. Check if
3321 * there is already a VSI list containing VSI that we
3322 * want to add. If found, use the same vsi_list_id for
3323 * this new VLAN rule or else create a new list.
3325 map_info = ice_find_vsi_list_entry(recp_list,
3329 status = ice_create_vsi_list_rule(hw,
3337 /* Convert the action to forwarding to a VSI list. */
3338 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3339 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3342 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3344 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3347 status = ICE_ERR_DOES_NOT_EXIST;
3350 /* reuse VSI list for new rule and increment ref_cnt */
3352 v_list_itr->vsi_list_info = map_info;
3353 map_info->ref_cnt++;
3355 v_list_itr->vsi_list_info =
3356 ice_create_vsi_list_map(hw, &vsi_handle,
3360 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3361 /* Update existing VSI list to add new VSI ID only if it used
3364 cur_fltr = &v_list_itr->fltr_info;
3365 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3368 /* If VLAN rule exists and VSI list being used by this rule is
3369 * referenced by more than 1 VLAN rule. Then create a new VSI
3370 * list appending previous VSI with new VSI and update existing
3371 * VLAN rule to point to new VSI list ID
3373 struct ice_fltr_info tmp_fltr;
3374 u16 vsi_handle_arr[2];
3377 /* Current implementation only supports reusing VSI list with
3378 * one VSI count. We should never hit below condition
3380 if (v_list_itr->vsi_count > 1 &&
3381 v_list_itr->vsi_list_info->ref_cnt > 1) {
3382 ice_debug(hw, ICE_DBG_SW,
3383 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3384 status = ICE_ERR_CFG;
3389 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3392 /* A rule already exists with the new VSI being added */
3393 if (cur_handle == vsi_handle) {
3394 status = ICE_ERR_ALREADY_EXISTS;
3398 vsi_handle_arr[0] = cur_handle;
3399 vsi_handle_arr[1] = vsi_handle;
3400 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3401 &vsi_list_id, lkup_type);
3405 tmp_fltr = v_list_itr->fltr_info;
3406 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3407 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3408 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3409 /* Update the previous switch rule to a new VSI list which
3410 * includes current VSI that is requested
3412 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3416 /* before overriding VSI list map info. decrement ref_cnt of
3419 v_list_itr->vsi_list_info->ref_cnt--;
3421 /* now update to newly created list */
3422 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3423 v_list_itr->vsi_list_info =
3424 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3426 v_list_itr->vsi_count++;
3430 ice_release_lock(rule_lock);
3435 * ice_add_vlan_rule - Add VLAN based filter rule
3436 * @hw: pointer to the hardware structure
3437 * @v_list: list of VLAN entries and forwarding information
3438 * @sw: pointer to switch info struct for which function add rule
3440 static enum ice_status
3441 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3442 struct ice_switch_info *sw)
3444 struct ice_fltr_list_entry *v_list_itr;
3445 struct ice_sw_recipe *recp_list;
3447 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3448 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3450 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3451 return ICE_ERR_PARAM;
3452 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3453 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3455 if (v_list_itr->status)
3456 return v_list_itr->status;
3462 * ice_add_vlan - Add a VLAN based filter rule
3463 * @hw: pointer to the hardware structure
3464 * @v_list: list of VLAN and forwarding information
3466 * Function add VLAN rule for logical port from HW struct
3469 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3472 return ICE_ERR_PARAM;
3474 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3478 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3479 * @hw: pointer to the hardware structure
3480 * @mv_list: list of MAC and VLAN filters
3482 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3483 * pruning bits enabled, then it is the responsibility of the caller to make
3484 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3485 * VLAN won't be received on that VSI otherwise.
3488 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3490 struct ice_fltr_list_entry *mv_list_itr;
3491 struct ice_sw_recipe *recp_list;
3493 if (!mv_list || !hw)
3494 return ICE_ERR_PARAM;
3496 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3497 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3499 enum ice_sw_lkup_type l_type =
3500 mv_list_itr->fltr_info.lkup_type;
3502 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3503 return ICE_ERR_PARAM;
3504 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3505 mv_list_itr->status =
3506 ice_add_rule_internal(hw, recp_list,
3507 hw->port_info->lport,
3509 if (mv_list_itr->status)
3510 return mv_list_itr->status;
3516 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3517 * @hw: pointer to the hardware structure
3518 * @em_list: list of ether type MAC filter, MAC is optional
3519 * @sw: pointer to switch info struct for which function add rule
3520 * @lport: logic port number on which function add rule
3522 * This function requires the caller to populate the entries in
3523 * the filter list with the necessary fields (including flags to
3524 * indicate Tx or Rx rules).
3526 static enum ice_status
3527 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3528 struct ice_switch_info *sw, u8 lport)
3530 struct ice_fltr_list_entry *em_list_itr;
3532 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3534 struct ice_sw_recipe *recp_list;
3535 enum ice_sw_lkup_type l_type;
3537 l_type = em_list_itr->fltr_info.lkup_type;
3538 recp_list = &sw->recp_list[l_type];
3540 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3541 l_type != ICE_SW_LKUP_ETHERTYPE)
3542 return ICE_ERR_PARAM;
3544 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3547 if (em_list_itr->status)
3548 return em_list_itr->status;
3555 * ice_add_eth_mac - Add a ethertype based filter rule
3556 * @hw: pointer to the hardware structure
3557 * @em_list: list of ethertype and forwarding information
3559 * Function add ethertype rule for logical port from HW struct
3561 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3563 if (!em_list || !hw)
3564 return ICE_ERR_PARAM;
3566 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3567 hw->port_info->lport);
3571 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3572 * @hw: pointer to the hardware structure
3573 * @em_list: list of ethertype or ethertype MAC entries
3574 * @sw: pointer to switch info struct for which function add rule
3576 static enum ice_status
3577 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3578 struct ice_switch_info *sw)
3580 struct ice_fltr_list_entry *em_list_itr, *tmp;
3582 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3584 struct ice_sw_recipe *recp_list;
3585 enum ice_sw_lkup_type l_type;
3587 l_type = em_list_itr->fltr_info.lkup_type;
3589 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3590 l_type != ICE_SW_LKUP_ETHERTYPE)
3591 return ICE_ERR_PARAM;
3593 recp_list = &sw->recp_list[l_type];
3594 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3596 if (em_list_itr->status)
3597 return em_list_itr->status;
3603 * ice_remove_eth_mac - remove a ethertype based filter rule
3604 * @hw: pointer to the hardware structure
3605 * @em_list: list of ethertype and forwarding information
3609 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3611 if (!em_list || !hw)
3612 return ICE_ERR_PARAM;
3614 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3618 * ice_rem_sw_rule_info
3619 * @hw: pointer to the hardware structure
3620 * @rule_head: pointer to the switch list structure that we want to delete
3623 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3625 if (!LIST_EMPTY(rule_head)) {
3626 struct ice_fltr_mgmt_list_entry *entry;
3627 struct ice_fltr_mgmt_list_entry *tmp;
3629 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3630 ice_fltr_mgmt_list_entry, list_entry) {
3631 LIST_DEL(&entry->list_entry);
3632 ice_free(hw, entry);
3638 * ice_rem_adv_rule_info
3639 * @hw: pointer to the hardware structure
3640 * @rule_head: pointer to the switch list structure that we want to delete
3643 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3645 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3646 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3648 if (LIST_EMPTY(rule_head))
3651 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3652 ice_adv_fltr_mgmt_list_entry, list_entry) {
3653 LIST_DEL(&lst_itr->list_entry);
3654 ice_free(hw, lst_itr->lkups);
3655 ice_free(hw, lst_itr);
3660 * ice_rem_all_sw_rules_info
3661 * @hw: pointer to the hardware structure
3663 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3665 struct ice_switch_info *sw = hw->switch_info;
3668 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3669 struct LIST_HEAD_TYPE *rule_head;
3671 rule_head = &sw->recp_list[i].filt_rules;
3672 if (!sw->recp_list[i].adv_rule)
3673 ice_rem_sw_rule_info(hw, rule_head);
3675 ice_rem_adv_rule_info(hw, rule_head);
3680 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3681 * @pi: pointer to the port_info structure
3682 * @vsi_handle: VSI handle to set as default
3683 * @set: true to add the above mentioned switch rule, false to remove it
3684 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3686 * add filter rule to set/unset given VSI as default VSI for the switch
3687 * (represented by swid)
3690 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3693 struct ice_aqc_sw_rules_elem *s_rule;
3694 struct ice_fltr_info f_info;
3695 struct ice_hw *hw = pi->hw;
3696 enum ice_adminq_opc opcode;
3697 enum ice_status status;
3701 if (!ice_is_vsi_valid(hw, vsi_handle))
3702 return ICE_ERR_PARAM;
3703 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3705 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3706 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3707 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3709 return ICE_ERR_NO_MEMORY;
3711 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3713 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3714 f_info.flag = direction;
3715 f_info.fltr_act = ICE_FWD_TO_VSI;
3716 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3718 if (f_info.flag & ICE_FLTR_RX) {
3719 f_info.src = pi->lport;
3720 f_info.src_id = ICE_SRC_ID_LPORT;
3722 f_info.fltr_rule_id =
3723 pi->dflt_rx_vsi_rule_id;
3724 } else if (f_info.flag & ICE_FLTR_TX) {
3725 f_info.src_id = ICE_SRC_ID_VSI;
3726 f_info.src = hw_vsi_id;
3728 f_info.fltr_rule_id =
3729 pi->dflt_tx_vsi_rule_id;
3733 opcode = ice_aqc_opc_add_sw_rules;
3735 opcode = ice_aqc_opc_remove_sw_rules;
3737 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3739 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3740 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3743 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3745 if (f_info.flag & ICE_FLTR_TX) {
3746 pi->dflt_tx_vsi_num = hw_vsi_id;
3747 pi->dflt_tx_vsi_rule_id = index;
3748 } else if (f_info.flag & ICE_FLTR_RX) {
3749 pi->dflt_rx_vsi_num = hw_vsi_id;
3750 pi->dflt_rx_vsi_rule_id = index;
3753 if (f_info.flag & ICE_FLTR_TX) {
3754 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3755 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3756 } else if (f_info.flag & ICE_FLTR_RX) {
3757 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3758 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3763 ice_free(hw, s_rule);
3768 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3769 * @list_head: head of rule list
3770 * @f_info: rule information
3772 * Helper function to search for a unicast rule entry - this is to be used
3773 * to remove unicast MAC filter that is not shared with other VSIs on the
3776 * Returns pointer to entry storing the rule if found
3778 static struct ice_fltr_mgmt_list_entry *
3779 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3780 struct ice_fltr_info *f_info)
3782 struct ice_fltr_mgmt_list_entry *list_itr;
3784 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3786 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3787 sizeof(f_info->l_data)) &&
3788 f_info->fwd_id.hw_vsi_id ==
3789 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3790 f_info->flag == list_itr->fltr_info.flag)
3797 * ice_remove_mac_rule - remove a MAC based filter rule
3798 * @hw: pointer to the hardware structure
3799 * @m_list: list of MAC addresses and forwarding information
3800 * @recp_list: list from which function remove MAC address
3802 * This function removes either a MAC filter rule or a specific VSI from a
3803 * VSI list for a multicast MAC address.
3805 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3806 * ice_add_mac. Caller should be aware that this call will only work if all
3807 * the entries passed into m_list were added previously. It will not attempt to
3808 * do a partial remove of entries that were found.
3810 static enum ice_status
3811 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3812 struct ice_sw_recipe *recp_list)
3814 struct ice_fltr_list_entry *list_itr, *tmp;
3815 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3818 return ICE_ERR_PARAM;
3820 rule_lock = &recp_list->filt_rule_lock;
3821 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3823 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3824 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3827 if (l_type != ICE_SW_LKUP_MAC)
3828 return ICE_ERR_PARAM;
3830 vsi_handle = list_itr->fltr_info.vsi_handle;
3831 if (!ice_is_vsi_valid(hw, vsi_handle))
3832 return ICE_ERR_PARAM;
3834 list_itr->fltr_info.fwd_id.hw_vsi_id =
3835 ice_get_hw_vsi_num(hw, vsi_handle);
3836 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3837 /* Don't remove the unicast address that belongs to
3838 * another VSI on the switch, since it is not being
3841 ice_acquire_lock(rule_lock);
3842 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3843 &list_itr->fltr_info)) {
3844 ice_release_lock(rule_lock);
3845 return ICE_ERR_DOES_NOT_EXIST;
3847 ice_release_lock(rule_lock);
3849 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3851 if (list_itr->status)
3852 return list_itr->status;
3858 * ice_remove_mac - remove a MAC address based filter rule
3859 * @hw: pointer to the hardware structure
3860 * @m_list: list of MAC addresses and forwarding information
3864 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3866 struct ice_sw_recipe *recp_list;
3868 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3869 return ice_remove_mac_rule(hw, m_list, recp_list);
3873 * ice_remove_vlan_rule - Remove VLAN based filter rule
3874 * @hw: pointer to the hardware structure
3875 * @v_list: list of VLAN entries and forwarding information
3876 * @recp_list: list from which function remove VLAN
3878 static enum ice_status
3879 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3880 struct ice_sw_recipe *recp_list)
3882 struct ice_fltr_list_entry *v_list_itr, *tmp;
3884 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3886 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3888 if (l_type != ICE_SW_LKUP_VLAN)
3889 return ICE_ERR_PARAM;
3890 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3892 if (v_list_itr->status)
3893 return v_list_itr->status;
3899 * ice_remove_vlan - remove a VLAN address based filter rule
3900 * @hw: pointer to the hardware structure
3901 * @v_list: list of VLAN and forwarding information
3905 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3907 struct ice_sw_recipe *recp_list;
3910 return ICE_ERR_PARAM;
3912 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3913 return ice_remove_vlan_rule(hw, v_list, recp_list);
3917 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3918 * @hw: pointer to the hardware structure
3919 * @v_list: list of MAC VLAN entries and forwarding information
3922 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3924 struct ice_fltr_list_entry *v_list_itr, *tmp;
3925 struct ice_sw_recipe *recp_list;
3928 return ICE_ERR_PARAM;
3930 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3931 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3933 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3935 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3936 return ICE_ERR_PARAM;
3937 v_list_itr->status =
3938 ice_remove_rule_internal(hw, recp_list,
3940 if (v_list_itr->status)
3941 return v_list_itr->status;
3947 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3948 * @fm_entry: filter entry to inspect
3949 * @vsi_handle: VSI handle to compare with filter info
3952 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3954 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3955 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3956 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3957 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3962 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3963 * @hw: pointer to the hardware structure
3964 * @vsi_handle: VSI handle to remove filters from
3965 * @vsi_list_head: pointer to the list to add entry to
3966 * @fi: pointer to fltr_info of filter entry to copy & add
3968 * Helper function, used when creating a list of filters to remove from
3969 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3970 * original filter entry, with the exception of fltr_info.fltr_act and
3971 * fltr_info.fwd_id fields. These are set such that later logic can
3972 * extract which VSI to remove the fltr from, and pass on that information.
3974 static enum ice_status
3975 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3976 struct LIST_HEAD_TYPE *vsi_list_head,
3977 struct ice_fltr_info *fi)
3979 struct ice_fltr_list_entry *tmp;
3981 /* this memory is freed up in the caller function
3982 * once filters for this VSI are removed
3984 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3986 return ICE_ERR_NO_MEMORY;
3988 tmp->fltr_info = *fi;
3990 /* Overwrite these fields to indicate which VSI to remove filter from,
3991 * so find and remove logic can extract the information from the
3992 * list entries. Note that original entries will still have proper
3995 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3996 tmp->fltr_info.vsi_handle = vsi_handle;
3997 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3999 LIST_ADD(&tmp->list_entry, vsi_list_head);
4005 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4006 * @hw: pointer to the hardware structure
4007 * @vsi_handle: VSI handle to remove filters from
4008 * @lkup_list_head: pointer to the list that has certain lookup type filters
4009 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4011 * Locates all filters in lkup_list_head that are used by the given VSI,
4012 * and adds COPIES of those entries to vsi_list_head (intended to be used
4013 * to remove the listed filters).
4014 * Note that this means all entries in vsi_list_head must be explicitly
4015 * deallocated by the caller when done with list.
4017 static enum ice_status
4018 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4019 struct LIST_HEAD_TYPE *lkup_list_head,
4020 struct LIST_HEAD_TYPE *vsi_list_head)
4022 struct ice_fltr_mgmt_list_entry *fm_entry;
4023 enum ice_status status = ICE_SUCCESS;
4025 /* check to make sure VSI ID is valid and within boundary */
4026 if (!ice_is_vsi_valid(hw, vsi_handle))
4027 return ICE_ERR_PARAM;
4029 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4030 ice_fltr_mgmt_list_entry, list_entry) {
4031 struct ice_fltr_info *fi;
4033 fi = &fm_entry->fltr_info;
4034 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4037 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4046 * ice_determine_promisc_mask
4047 * @fi: filter info to parse
4049 * Helper function to determine which ICE_PROMISC_ mask corresponds
4050 * to given filter into.
4052 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4054 u16 vid = fi->l_data.mac_vlan.vlan_id;
4055 u8 *macaddr = fi->l_data.mac.mac_addr;
4056 bool is_tx_fltr = false;
4057 u8 promisc_mask = 0;
4059 if (fi->flag == ICE_FLTR_TX)
4062 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4063 promisc_mask |= is_tx_fltr ?
4064 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4065 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4066 promisc_mask |= is_tx_fltr ?
4067 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4068 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4069 promisc_mask |= is_tx_fltr ?
4070 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4072 promisc_mask |= is_tx_fltr ?
4073 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4075 return promisc_mask;
4079 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4080 * @hw: pointer to the hardware structure
4081 * @vsi_handle: VSI handle to retrieve info from
4082 * @promisc_mask: pointer to mask to be filled in
4083 * @vid: VLAN ID of promisc VLAN VSI
4086 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4089 struct ice_switch_info *sw = hw->switch_info;
4090 struct ice_fltr_mgmt_list_entry *itr;
4091 struct LIST_HEAD_TYPE *rule_head;
4092 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4094 if (!ice_is_vsi_valid(hw, vsi_handle))
4095 return ICE_ERR_PARAM;
4099 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4100 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4102 ice_acquire_lock(rule_lock);
4103 LIST_FOR_EACH_ENTRY(itr, rule_head,
4104 ice_fltr_mgmt_list_entry, list_entry) {
4105 /* Continue if this filter doesn't apply to this VSI or the
4106 * VSI ID is not in the VSI map for this filter
4108 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4111 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4113 ice_release_lock(rule_lock);
4119 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4120 * @hw: pointer to the hardware structure
4121 * @vsi_handle: VSI handle to retrieve info from
4122 * @promisc_mask: pointer to mask to be filled in
4123 * @vid: VLAN ID of promisc VLAN VSI
4126 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4129 struct ice_switch_info *sw = hw->switch_info;
4130 struct ice_fltr_mgmt_list_entry *itr;
4131 struct LIST_HEAD_TYPE *rule_head;
4132 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4134 if (!ice_is_vsi_valid(hw, vsi_handle))
4135 return ICE_ERR_PARAM;
4139 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4140 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4142 ice_acquire_lock(rule_lock);
4143 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4145 /* Continue if this filter doesn't apply to this VSI or the
4146 * VSI ID is not in the VSI map for this filter
4148 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4151 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4153 ice_release_lock(rule_lock);
4159 * ice_remove_promisc - Remove promisc based filter rules
4160 * @hw: pointer to the hardware structure
4161 * @recp_id: recipe ID for which the rule needs to removed
4162 * @v_list: list of promisc entries
4164 static enum ice_status
4165 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4166 struct LIST_HEAD_TYPE *v_list)
4168 struct ice_fltr_list_entry *v_list_itr, *tmp;
4169 struct ice_sw_recipe *recp_list;
4171 recp_list = &hw->switch_info->recp_list[recp_id];
4172 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4174 v_list_itr->status =
4175 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4176 if (v_list_itr->status)
4177 return v_list_itr->status;
4183 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4184 * @hw: pointer to the hardware structure
4185 * @vsi_handle: VSI handle to clear mode
4186 * @promisc_mask: mask of promiscuous config bits to clear
4187 * @vid: VLAN ID to clear VLAN promiscuous
4190 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4193 struct ice_switch_info *sw = hw->switch_info;
4194 struct ice_fltr_list_entry *fm_entry, *tmp;
4195 struct LIST_HEAD_TYPE remove_list_head;
4196 struct ice_fltr_mgmt_list_entry *itr;
4197 struct LIST_HEAD_TYPE *rule_head;
4198 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4199 enum ice_status status = ICE_SUCCESS;
4202 if (!ice_is_vsi_valid(hw, vsi_handle))
4203 return ICE_ERR_PARAM;
4205 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4206 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4208 recipe_id = ICE_SW_LKUP_PROMISC;
4210 rule_head = &sw->recp_list[recipe_id].filt_rules;
4211 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4213 INIT_LIST_HEAD(&remove_list_head);
4215 ice_acquire_lock(rule_lock);
4216 LIST_FOR_EACH_ENTRY(itr, rule_head,
4217 ice_fltr_mgmt_list_entry, list_entry) {
4218 struct ice_fltr_info *fltr_info;
4219 u8 fltr_promisc_mask = 0;
4221 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4223 fltr_info = &itr->fltr_info;
4225 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4226 vid != fltr_info->l_data.mac_vlan.vlan_id)
4229 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4231 /* Skip if filter is not completely specified by given mask */
4232 if (fltr_promisc_mask & ~promisc_mask)
4235 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4239 ice_release_lock(rule_lock);
4240 goto free_fltr_list;
4243 ice_release_lock(rule_lock);
4245 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4248 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4249 ice_fltr_list_entry, list_entry) {
4250 LIST_DEL(&fm_entry->list_entry);
4251 ice_free(hw, fm_entry);
4258 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4259 * @hw: pointer to the hardware structure
4260 * @vsi_handle: VSI handle to configure
4261 * @promisc_mask: mask of promiscuous config bits
4262 * @vid: VLAN ID to set VLAN promiscuous
4265 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4267 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4268 struct ice_fltr_list_entry f_list_entry;
4269 struct ice_fltr_info new_fltr;
4270 enum ice_status status = ICE_SUCCESS;
4276 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4278 if (!ice_is_vsi_valid(hw, vsi_handle))
4279 return ICE_ERR_PARAM;
4280 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4282 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4284 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4285 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4286 new_fltr.l_data.mac_vlan.vlan_id = vid;
4287 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4289 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4290 recipe_id = ICE_SW_LKUP_PROMISC;
4293 /* Separate filters must be set for each direction/packet type
4294 * combination, so we will loop over the mask value, store the
4295 * individual type, and clear it out in the input mask as it
4298 while (promisc_mask) {
4299 struct ice_sw_recipe *recp_list;
4305 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4306 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4307 pkt_type = UCAST_FLTR;
4308 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4309 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4310 pkt_type = UCAST_FLTR;
4312 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4313 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4314 pkt_type = MCAST_FLTR;
4315 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4316 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4317 pkt_type = MCAST_FLTR;
4319 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4320 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4321 pkt_type = BCAST_FLTR;
4322 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4323 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4324 pkt_type = BCAST_FLTR;
4328 /* Check for VLAN promiscuous flag */
4329 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4330 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4331 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4332 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4336 /* Set filter DA based on packet type */
4337 mac_addr = new_fltr.l_data.mac.mac_addr;
4338 if (pkt_type == BCAST_FLTR) {
4339 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4340 } else if (pkt_type == MCAST_FLTR ||
4341 pkt_type == UCAST_FLTR) {
4342 /* Use the dummy ether header DA */
4343 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4344 ICE_NONDMA_TO_NONDMA);
4345 if (pkt_type == MCAST_FLTR)
4346 mac_addr[0] |= 0x1; /* Set multicast bit */
4349 /* Need to reset this to zero for all iterations */
4352 new_fltr.flag |= ICE_FLTR_TX;
4353 new_fltr.src = hw_vsi_id;
4355 new_fltr.flag |= ICE_FLTR_RX;
4356 new_fltr.src = hw->port_info->lport;
4359 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4360 new_fltr.vsi_handle = vsi_handle;
4361 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4362 f_list_entry.fltr_info = new_fltr;
4363 recp_list = &hw->switch_info->recp_list[recipe_id];
4365 status = ice_add_rule_internal(hw, recp_list,
4366 hw->port_info->lport,
4368 if (status != ICE_SUCCESS)
4369 goto set_promisc_exit;
4377 * ice_set_vlan_vsi_promisc
4378 * @hw: pointer to the hardware structure
4379 * @vsi_handle: VSI handle to configure
4380 * @promisc_mask: mask of promiscuous config bits
4381 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4383 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4386 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4387 bool rm_vlan_promisc)
4389 struct ice_switch_info *sw = hw->switch_info;
4390 struct ice_fltr_list_entry *list_itr, *tmp;
4391 struct LIST_HEAD_TYPE vsi_list_head;
4392 struct LIST_HEAD_TYPE *vlan_head;
4393 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4394 enum ice_status status;
4397 INIT_LIST_HEAD(&vsi_list_head);
4398 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4399 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4400 ice_acquire_lock(vlan_lock);
4401 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4403 ice_release_lock(vlan_lock);
4405 goto free_fltr_list;
4407 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4409 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4410 if (rm_vlan_promisc)
4411 status = ice_clear_vsi_promisc(hw, vsi_handle,
4412 promisc_mask, vlan_id);
4414 status = ice_set_vsi_promisc(hw, vsi_handle,
4415 promisc_mask, vlan_id);
4421 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4422 ice_fltr_list_entry, list_entry) {
4423 LIST_DEL(&list_itr->list_entry);
4424 ice_free(hw, list_itr);
4430 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4431 * @hw: pointer to the hardware structure
4432 * @vsi_handle: VSI handle to remove filters from
4433 * @recp_list: recipe list from which function remove fltr
4434 * @lkup: switch rule filter lookup type
4437 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4438 struct ice_sw_recipe *recp_list,
4439 enum ice_sw_lkup_type lkup)
4441 struct ice_fltr_list_entry *fm_entry;
4442 struct LIST_HEAD_TYPE remove_list_head;
4443 struct LIST_HEAD_TYPE *rule_head;
4444 struct ice_fltr_list_entry *tmp;
4445 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4446 enum ice_status status;
4448 INIT_LIST_HEAD(&remove_list_head);
4449 rule_lock = &recp_list[lkup].filt_rule_lock;
4450 rule_head = &recp_list[lkup].filt_rules;
4451 ice_acquire_lock(rule_lock);
4452 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4454 ice_release_lock(rule_lock);
4459 case ICE_SW_LKUP_MAC:
4460 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4462 case ICE_SW_LKUP_VLAN:
4463 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4465 case ICE_SW_LKUP_PROMISC:
4466 case ICE_SW_LKUP_PROMISC_VLAN:
4467 ice_remove_promisc(hw, lkup, &remove_list_head);
4469 case ICE_SW_LKUP_MAC_VLAN:
4470 ice_remove_mac_vlan(hw, &remove_list_head);
4472 case ICE_SW_LKUP_ETHERTYPE:
4473 case ICE_SW_LKUP_ETHERTYPE_MAC:
4474 ice_remove_eth_mac(hw, &remove_list_head);
4476 case ICE_SW_LKUP_DFLT:
4477 ice_debug(hw, ICE_DBG_SW,
4478 "Remove filters for this lookup type hasn't been implemented yet\n");
4480 case ICE_SW_LKUP_LAST:
4481 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4485 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4486 ice_fltr_list_entry, list_entry) {
4487 LIST_DEL(&fm_entry->list_entry);
4488 ice_free(hw, fm_entry);
4493 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4494 * @hw: pointer to the hardware structure
4495 * @vsi_handle: VSI handle to remove filters from
4496 * @sw: pointer to switch info struct
4499 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4500 struct ice_switch_info *sw)
4502 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4504 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4505 sw->recp_list, ICE_SW_LKUP_MAC);
4506 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4507 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4508 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4509 sw->recp_list, ICE_SW_LKUP_PROMISC);
4510 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4511 sw->recp_list, ICE_SW_LKUP_VLAN);
4512 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4513 sw->recp_list, ICE_SW_LKUP_DFLT);
4514 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4515 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4516 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4517 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4518 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4519 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4523 * ice_remove_vsi_fltr - Remove all filters for a VSI
4524 * @hw: pointer to the hardware structure
4525 * @vsi_handle: VSI handle to remove filters from
4527 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4529 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4533 * ice_alloc_res_cntr - allocating resource counter
4534 * @hw: pointer to the hardware structure
4535 * @type: type of resource
4536 * @alloc_shared: if set it is shared else dedicated
4537 * @num_items: number of entries requested for FD resource type
4538 * @counter_id: counter index returned by AQ call
4541 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4544 struct ice_aqc_alloc_free_res_elem *buf;
4545 enum ice_status status;
4548 /* Allocate resource */
4549 buf_len = sizeof(*buf);
4550 buf = (struct ice_aqc_alloc_free_res_elem *)
4551 ice_malloc(hw, buf_len);
4553 return ICE_ERR_NO_MEMORY;
4555 buf->num_elems = CPU_TO_LE16(num_items);
4556 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4557 ICE_AQC_RES_TYPE_M) | alloc_shared);
4559 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4560 ice_aqc_opc_alloc_res, NULL);
4564 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4572 * ice_free_res_cntr - free resource counter
4573 * @hw: pointer to the hardware structure
4574 * @type: type of resource
4575 * @alloc_shared: if set it is shared else dedicated
4576 * @num_items: number of entries to be freed for FD resource type
4577 * @counter_id: counter ID resource which needs to be freed
4580 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4583 struct ice_aqc_alloc_free_res_elem *buf;
4584 enum ice_status status;
4588 buf_len = sizeof(*buf);
4589 buf = (struct ice_aqc_alloc_free_res_elem *)
4590 ice_malloc(hw, buf_len);
4592 return ICE_ERR_NO_MEMORY;
4594 buf->num_elems = CPU_TO_LE16(num_items);
4595 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4596 ICE_AQC_RES_TYPE_M) | alloc_shared);
4597 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4599 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4600 ice_aqc_opc_free_res, NULL);
4602 ice_debug(hw, ICE_DBG_SW,
4603 "counter resource could not be freed\n");
4610 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4611 * @hw: pointer to the hardware structure
4612 * @counter_id: returns counter index
4614 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4616 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4617 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4622 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4623 * @hw: pointer to the hardware structure
4624 * @counter_id: counter index to be freed
4626 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4628 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4629 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4634 * ice_alloc_res_lg_act - add large action resource
4635 * @hw: pointer to the hardware structure
4636 * @l_id: large action ID to fill it in
4637 * @num_acts: number of actions to hold with a large action entry
4639 static enum ice_status
4640 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4642 struct ice_aqc_alloc_free_res_elem *sw_buf;
4643 enum ice_status status;
4646 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4647 return ICE_ERR_PARAM;
4649 /* Allocate resource for large action */
4650 buf_len = sizeof(*sw_buf);
4651 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4652 ice_malloc(hw, buf_len);
4654 return ICE_ERR_NO_MEMORY;
4656 sw_buf->num_elems = CPU_TO_LE16(1);
4658 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4659 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4660 * If num_acts is greater than 2, then use
4661 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4662 * The num_acts cannot exceed 4. This was ensured at the
4663 * beginning of the function.
4666 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4667 else if (num_acts == 2)
4668 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4670 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4672 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4673 ice_aqc_opc_alloc_res, NULL);
4675 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4677 ice_free(hw, sw_buf);
4682 * ice_add_mac_with_sw_marker - add filter with sw marker
4683 * @hw: pointer to the hardware structure
4684 * @f_info: filter info structure containing the MAC filter information
4685 * @sw_marker: sw marker to tag the Rx descriptor with
4688 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4691 struct ice_fltr_mgmt_list_entry *m_entry;
4692 struct ice_fltr_list_entry fl_info;
4693 struct ice_sw_recipe *recp_list;
4694 struct LIST_HEAD_TYPE l_head;
4695 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4696 enum ice_status ret;
4700 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4701 return ICE_ERR_PARAM;
4703 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4704 return ICE_ERR_PARAM;
4706 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4707 return ICE_ERR_PARAM;
4709 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4710 return ICE_ERR_PARAM;
4711 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4713 /* Add filter if it doesn't exist so then the adding of large
4714 * action always results in update
4717 INIT_LIST_HEAD(&l_head);
4718 fl_info.fltr_info = *f_info;
4719 LIST_ADD(&fl_info.list_entry, &l_head);
4721 entry_exists = false;
4722 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4723 hw->port_info->lport);
4724 if (ret == ICE_ERR_ALREADY_EXISTS)
4725 entry_exists = true;
4729 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4730 rule_lock = &recp_list->filt_rule_lock;
4731 ice_acquire_lock(rule_lock);
4732 /* Get the book keeping entry for the filter */
4733 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4737 /* If counter action was enabled for this rule then don't enable
4738 * sw marker large action
4740 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4741 ret = ICE_ERR_PARAM;
4745 /* if same marker was added before */
4746 if (m_entry->sw_marker_id == sw_marker) {
4747 ret = ICE_ERR_ALREADY_EXISTS;
4751 /* Allocate a hardware table entry to hold large act. Three actions
4752 * for marker based large action
4754 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4758 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4761 /* Update the switch rule to add the marker action */
4762 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4764 ice_release_lock(rule_lock);
4769 ice_release_lock(rule_lock);
4770 /* only remove entry if it did not exist previously */
4772 ret = ice_remove_mac(hw, &l_head);
4778 * ice_add_mac_with_counter - add filter with counter enabled
4779 * @hw: pointer to the hardware structure
4780 * @f_info: pointer to filter info structure containing the MAC filter
4784 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4786 struct ice_fltr_mgmt_list_entry *m_entry;
4787 struct ice_fltr_list_entry fl_info;
4788 struct ice_sw_recipe *recp_list;
4789 struct LIST_HEAD_TYPE l_head;
4790 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4791 enum ice_status ret;
4796 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4797 return ICE_ERR_PARAM;
4799 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4800 return ICE_ERR_PARAM;
4802 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4803 return ICE_ERR_PARAM;
4804 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4805 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4807 entry_exist = false;
4809 rule_lock = &recp_list->filt_rule_lock;
4811 /* Add filter if it doesn't exist so then the adding of large
4812 * action always results in update
4814 INIT_LIST_HEAD(&l_head);
4816 fl_info.fltr_info = *f_info;
4817 LIST_ADD(&fl_info.list_entry, &l_head);
4819 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4820 hw->port_info->lport);
4821 if (ret == ICE_ERR_ALREADY_EXISTS)
4826 ice_acquire_lock(rule_lock);
4827 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4829 ret = ICE_ERR_BAD_PTR;
4833 /* Don't enable counter for a filter for which sw marker was enabled */
4834 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4835 ret = ICE_ERR_PARAM;
4839 /* If a counter was already enabled then don't need to add again */
4840 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4841 ret = ICE_ERR_ALREADY_EXISTS;
4845 /* Allocate a hardware table entry to VLAN counter */
4846 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4850 /* Allocate a hardware table entry to hold large act. Two actions for
4851 * counter based large action
4853 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4857 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4860 /* Update the switch rule to add the counter action */
4861 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4863 ice_release_lock(rule_lock);
4868 ice_release_lock(rule_lock);
4869 /* only remove entry if it did not exist previously */
4871 ret = ice_remove_mac(hw, &l_head);
4876 /* This is mapping table entry that maps every word within a given protocol
4877 * structure to the real byte offset as per the specification of that
4879 * for example dst address is 3 words in ethertype header and corresponding
4880 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4881 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4882 * matching entry describing its field. This needs to be updated if new
4883 * structure is added to that union.
4885 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4886 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4887 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4888 { ICE_ETYPE_OL, { 0 } },
4889 { ICE_VLAN_OFOS, { 0, 2 } },
4890 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4891 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4892 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4893 26, 28, 30, 32, 34, 36, 38 } },
4894 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4895 26, 28, 30, 32, 34, 36, 38 } },
4896 { ICE_TCP_IL, { 0, 2 } },
4897 { ICE_UDP_OF, { 0, 2 } },
4898 { ICE_UDP_ILOS, { 0, 2 } },
4899 { ICE_SCTP_IL, { 0, 2 } },
4900 { ICE_VXLAN, { 8, 10, 12, 14 } },
4901 { ICE_GENEVE, { 8, 10, 12, 14 } },
4902 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4903 { ICE_NVGRE, { 0, 2, 4, 6 } },
4904 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4905 { ICE_PPPOE, { 0, 2, 4, 6 } },
4908 /* The following table describes preferred grouping of recipes.
4909 * If a recipe that needs to be programmed is a superset or matches one of the
4910 * following combinations, then the recipe needs to be chained as per the
4914 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4915 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4916 { ICE_MAC_IL, ICE_MAC_IL_HW },
4917 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4918 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4919 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4920 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4921 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4922 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4923 { ICE_TCP_IL, ICE_TCP_IL_HW },
4924 { ICE_UDP_OF, ICE_UDP_OF_HW },
4925 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4926 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4927 { ICE_VXLAN, ICE_UDP_OF_HW },
4928 { ICE_GENEVE, ICE_UDP_OF_HW },
4929 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4930 { ICE_NVGRE, ICE_GRE_OF_HW },
4931 { ICE_GTP, ICE_UDP_OF_HW },
4932 { ICE_PPPOE, ICE_PPPOE_HW },
4936 * ice_find_recp - find a recipe
4937 * @hw: pointer to the hardware structure
4938 * @lkup_exts: extension sequence to match
4940 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4942 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4944 bool refresh_required = true;
4945 struct ice_sw_recipe *recp;
4948 /* Walk through existing recipes to find a match */
4949 recp = hw->switch_info->recp_list;
4950 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4951 /* If recipe was not created for this ID, in SW bookkeeping,
4952 * check if FW has an entry for this recipe. If the FW has an
4953 * entry update it in our SW bookkeeping and continue with the
4956 if (!recp[i].recp_created)
4957 if (ice_get_recp_frm_fw(hw,
4958 hw->switch_info->recp_list, i,
4962 /* Skip inverse action recipes */
4963 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4964 ICE_AQ_RECIPE_ACT_INV_ACT)
4967 /* if number of words we are looking for match */
4968 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4969 struct ice_fv_word *a = lkup_exts->fv_words;
4970 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4974 for (p = 0; p < lkup_exts->n_val_words; p++) {
4975 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4977 if (a[p].off == b[q].off &&
4978 a[p].prot_id == b[q].prot_id)
4979 /* Found the "p"th word in the
4984 /* After walking through all the words in the
4985 * "i"th recipe if "p"th word was not found then
4986 * this recipe is not what we are looking for.
4987 * So break out from this loop and try the next
4990 if (q >= recp[i].lkup_exts.n_val_words) {
4995 /* If for "i"th recipe the found was never set to false
4996 * then it means we found our match
4999 return i; /* Return the recipe ID */
5002 return ICE_MAX_NUM_RECIPES;
5006 * ice_prot_type_to_id - get protocol ID from protocol type
5007 * @type: protocol type
5008 * @id: pointer to variable that will receive the ID
5010 * Returns true if found, false otherwise
5012 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
5016 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5017 if (ice_prot_id_tbl[i].type == type) {
5018 *id = ice_prot_id_tbl[i].protocol_id;
5025 * ice_find_valid_words - count valid words
5026 * @rule: advanced rule with lookup information
5027 * @lkup_exts: byte offset extractions of the words that are valid
5029 * calculate valid words in a lookup rule using mask value
5032 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5033 struct ice_prot_lkup_ext *lkup_exts)
5039 if (!ice_prot_type_to_id(rule->type, &prot_id))
5042 word = lkup_exts->n_val_words;
5044 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5045 if (((u16 *)&rule->m_u)[j] &&
5046 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
5047 /* No more space to accommodate */
5048 if (word >= ICE_MAX_CHAIN_WORDS)
5050 lkup_exts->fv_words[word].off =
5051 ice_prot_ext[rule->type].offs[j];
5052 lkup_exts->fv_words[word].prot_id =
5053 ice_prot_id_tbl[rule->type].protocol_id;
5054 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5058 ret_val = word - lkup_exts->n_val_words;
5059 lkup_exts->n_val_words = word;
5065 * ice_create_first_fit_recp_def - Create a recipe grouping
5066 * @hw: pointer to the hardware structure
5067 * @lkup_exts: an array of protocol header extractions
5068 * @rg_list: pointer to a list that stores new recipe groups
5069 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5071 * Using first fit algorithm, take all the words that are still not done
5072 * and start grouping them in 4-word groups. Each group makes up one
5075 static enum ice_status
5076 ice_create_first_fit_recp_def(struct ice_hw *hw,
5077 struct ice_prot_lkup_ext *lkup_exts,
5078 struct LIST_HEAD_TYPE *rg_list,
5081 struct ice_pref_recipe_group *grp = NULL;
5086 /* Walk through every word in the rule to check if it is not done. If so
5087 * then this word needs to be part of a new recipe.
5089 for (j = 0; j < lkup_exts->n_val_words; j++)
5090 if (!ice_is_bit_set(lkup_exts->done, j)) {
5092 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5093 struct ice_recp_grp_entry *entry;
5095 entry = (struct ice_recp_grp_entry *)
5096 ice_malloc(hw, sizeof(*entry));
5098 return ICE_ERR_NO_MEMORY;
5099 LIST_ADD(&entry->l_entry, rg_list);
5100 grp = &entry->r_group;
5104 grp->pairs[grp->n_val_pairs].prot_id =
5105 lkup_exts->fv_words[j].prot_id;
5106 grp->pairs[grp->n_val_pairs].off =
5107 lkup_exts->fv_words[j].off;
5108 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5116 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5117 * @hw: pointer to the hardware structure
5118 * @fv_list: field vector with the extraction sequence information
5119 * @rg_list: recipe groupings with protocol-offset pairs
5121 * Helper function to fill in the field vector indices for protocol-offset
5122 * pairs. These indexes are then ultimately programmed into a recipe.
5124 static enum ice_status
5125 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5126 struct LIST_HEAD_TYPE *rg_list)
5128 struct ice_sw_fv_list_entry *fv;
5129 struct ice_recp_grp_entry *rg;
5130 struct ice_fv_word *fv_ext;
5132 if (LIST_EMPTY(fv_list))
5135 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5136 fv_ext = fv->fv_ptr->ew;
5138 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5141 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5142 struct ice_fv_word *pr;
5147 pr = &rg->r_group.pairs[i];
5148 mask = rg->r_group.mask[i];
5150 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5151 if (fv_ext[j].prot_id == pr->prot_id &&
5152 fv_ext[j].off == pr->off) {
5155 /* Store index of field vector */
5157 /* Mask is given by caller as big
5158 * endian, but sent to FW as little
5161 rg->fv_mask[i] = mask << 8 | mask >> 8;
5165 /* Protocol/offset could not be found, caller gave an
5169 return ICE_ERR_PARAM;
5177 * ice_find_free_recp_res_idx - find free result indexes for recipe
5178 * @hw: pointer to hardware structure
5179 * @profiles: bitmap of profiles that will be associated with the new recipe
5180 * @free_idx: pointer to variable to receive the free index bitmap
5182 * The algorithm used here is:
5183 * 1. When creating a new recipe, create a set P which contains all
5184 * Profiles that will be associated with our new recipe
5186 * 2. For each Profile p in set P:
5187 * a. Add all recipes associated with Profile p into set R
5188 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5189 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5190 * i. Or just assume they all have the same possible indexes:
5192 * i.e., PossibleIndexes = 0x0000F00000000000
5194 * 3. For each Recipe r in set R:
5195 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5196 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5198 * FreeIndexes will contain the bits indicating the indexes free for use,
5199 * then the code needs to update the recipe[r].used_result_idx_bits to
5200 * indicate which indexes were selected for use by this recipe.
5203 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5204 ice_bitmap_t *free_idx)
5206 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5207 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5208 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5212 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5213 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5214 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5215 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5217 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5218 ice_set_bit(count, possible_idx);
5220 /* For each profile we are going to associate the recipe with, add the
5221 * recipes that are associated with that profile. This will give us
5222 * the set of recipes that our recipe may collide with. Also, determine
5223 * what possible result indexes are usable given this set of profiles.
5226 while (ICE_MAX_NUM_PROFILES >
5227 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5228 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5229 ICE_MAX_NUM_RECIPES);
5230 ice_and_bitmap(possible_idx, possible_idx,
5231 hw->switch_info->prof_res_bm[bit],
5236 /* For each recipe that our new recipe may collide with, determine
5237 * which indexes have been used.
5239 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5240 if (ice_is_bit_set(recipes, bit)) {
5241 ice_or_bitmap(used_idx, used_idx,
5242 hw->switch_info->recp_list[bit].res_idxs,
5246 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5248 /* return number of free indexes */
5251 while (ICE_MAX_FV_WORDS >
5252 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5261 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5262 * @hw: pointer to hardware structure
5263 * @rm: recipe management list entry
5264 * @match_tun: if field vector index for tunnel needs to be programmed
5265 * @profiles: bitmap of profiles that will be assocated.
5267 static enum ice_status
5268 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5269 bool match_tun, ice_bitmap_t *profiles)
5271 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5272 struct ice_aqc_recipe_data_elem *tmp;
5273 struct ice_aqc_recipe_data_elem *buf;
5274 struct ice_recp_grp_entry *entry;
5275 enum ice_status status;
5281 /* When more than one recipe are required, another recipe is needed to
5282 * chain them together. Matching a tunnel metadata ID takes up one of
5283 * the match fields in the chaining recipe reducing the number of
5284 * chained recipes by one.
5286 /* check number of free result indices */
5287 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5288 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5290 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5291 free_res_idx, rm->n_grp_count);
5293 if (rm->n_grp_count > 1) {
5294 if (rm->n_grp_count > free_res_idx)
5295 return ICE_ERR_MAX_LIMIT;
5300 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5301 ICE_MAX_NUM_RECIPES,
5304 return ICE_ERR_NO_MEMORY;
5306 buf = (struct ice_aqc_recipe_data_elem *)
5307 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5309 status = ICE_ERR_NO_MEMORY;
5313 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5314 recipe_count = ICE_MAX_NUM_RECIPES;
5315 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5317 if (status || recipe_count == 0)
5320 /* Allocate the recipe resources, and configure them according to the
5321 * match fields from protocol headers and extracted field vectors.
5323 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5324 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5327 status = ice_alloc_recipe(hw, &entry->rid);
5331 /* Clear the result index of the located recipe, as this will be
5332 * updated, if needed, later in the recipe creation process.
5334 tmp[0].content.result_indx = 0;
5336 buf[recps] = tmp[0];
5337 buf[recps].recipe_indx = (u8)entry->rid;
5338 /* if the recipe is a non-root recipe RID should be programmed
5339 * as 0 for the rules to be applied correctly.
5341 buf[recps].content.rid = 0;
5342 ice_memset(&buf[recps].content.lkup_indx, 0,
5343 sizeof(buf[recps].content.lkup_indx),
5346 /* All recipes use look-up index 0 to match switch ID. */
5347 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5348 buf[recps].content.mask[0] =
5349 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5350 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5353 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5354 buf[recps].content.lkup_indx[i] = 0x80;
5355 buf[recps].content.mask[i] = 0;
5358 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5359 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5360 buf[recps].content.mask[i + 1] =
5361 CPU_TO_LE16(entry->fv_mask[i]);
5364 if (rm->n_grp_count > 1) {
5365 /* Checks to see if there really is a valid result index
5368 if (chain_idx >= ICE_MAX_FV_WORDS) {
5369 ice_debug(hw, ICE_DBG_SW,
5370 "No chain index available\n");
5371 status = ICE_ERR_MAX_LIMIT;
5375 entry->chain_idx = chain_idx;
5376 buf[recps].content.result_indx =
5377 ICE_AQ_RECIPE_RESULT_EN |
5378 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5379 ICE_AQ_RECIPE_RESULT_DATA_M);
5380 ice_clear_bit(chain_idx, result_idx_bm);
5381 chain_idx = ice_find_first_bit(result_idx_bm,
5385 /* fill recipe dependencies */
5386 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5387 ICE_MAX_NUM_RECIPES);
5388 ice_set_bit(buf[recps].recipe_indx,
5389 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5390 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5394 if (rm->n_grp_count == 1) {
5395 rm->root_rid = buf[0].recipe_indx;
5396 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5397 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5398 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5399 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5400 sizeof(buf[0].recipe_bitmap),
5401 ICE_NONDMA_TO_NONDMA);
5403 status = ICE_ERR_BAD_PTR;
5406 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5407 * the recipe which is getting created if specified
5408 * by user. Usually any advanced switch filter, which results
5409 * into new extraction sequence, ended up creating a new recipe
5410 * of type ROOT and usually recipes are associated with profiles
5411 * Switch rule referreing newly created recipe, needs to have
5412 * either/or 'fwd' or 'join' priority, otherwise switch rule
5413 * evaluation will not happen correctly. In other words, if
5414 * switch rule to be evaluated on priority basis, then recipe
5415 * needs to have priority, otherwise it will be evaluated last.
5417 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5419 struct ice_recp_grp_entry *last_chain_entry;
5422 /* Allocate the last recipe that will chain the outcomes of the
5423 * other recipes together
5425 status = ice_alloc_recipe(hw, &rid);
5429 buf[recps].recipe_indx = (u8)rid;
5430 buf[recps].content.rid = (u8)rid;
5431 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5432 /* the new entry created should also be part of rg_list to
5433 * make sure we have complete recipe
5435 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5436 sizeof(*last_chain_entry));
5437 if (!last_chain_entry) {
5438 status = ICE_ERR_NO_MEMORY;
5441 last_chain_entry->rid = rid;
5442 ice_memset(&buf[recps].content.lkup_indx, 0,
5443 sizeof(buf[recps].content.lkup_indx),
5445 /* All recipes use look-up index 0 to match switch ID. */
5446 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5447 buf[recps].content.mask[0] =
5448 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5449 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5450 buf[recps].content.lkup_indx[i] =
5451 ICE_AQ_RECIPE_LKUP_IGNORE;
5452 buf[recps].content.mask[i] = 0;
5456 /* update r_bitmap with the recp that is used for chaining */
5457 ice_set_bit(rid, rm->r_bitmap);
5458 /* this is the recipe that chains all the other recipes so it
5459 * should not have a chaining ID to indicate the same
5461 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5462 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5464 last_chain_entry->fv_idx[i] = entry->chain_idx;
5465 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5466 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5467 ice_set_bit(entry->rid, rm->r_bitmap);
5469 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5470 if (sizeof(buf[recps].recipe_bitmap) >=
5471 sizeof(rm->r_bitmap)) {
5472 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5473 sizeof(buf[recps].recipe_bitmap),
5474 ICE_NONDMA_TO_NONDMA);
5476 status = ICE_ERR_BAD_PTR;
5479 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5481 /* To differentiate among different UDP tunnels, a meta data ID
5485 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5486 buf[recps].content.mask[i] =
5487 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5491 rm->root_rid = (u8)rid;
5493 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5497 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5498 ice_release_change_lock(hw);
5502 /* Every recipe that just got created add it to the recipe
5505 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5506 struct ice_switch_info *sw = hw->switch_info;
5507 bool is_root, idx_found = false;
5508 struct ice_sw_recipe *recp;
5509 u16 idx, buf_idx = 0;
5511 /* find buffer index for copying some data */
5512 for (idx = 0; idx < rm->n_grp_count; idx++)
5513 if (buf[idx].recipe_indx == entry->rid) {
5519 status = ICE_ERR_OUT_OF_RANGE;
5523 recp = &sw->recp_list[entry->rid];
5524 is_root = (rm->root_rid == entry->rid);
5525 recp->is_root = is_root;
5527 recp->root_rid = entry->rid;
5528 recp->big_recp = (is_root && rm->n_grp_count > 1);
5530 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5531 entry->r_group.n_val_pairs *
5532 sizeof(struct ice_fv_word),
5533 ICE_NONDMA_TO_NONDMA);
5535 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5536 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5538 /* Copy non-result fv index values and masks to recipe. This
5539 * call will also update the result recipe bitmask.
5541 ice_collect_result_idx(&buf[buf_idx], recp);
5543 /* for non-root recipes, also copy to the root, this allows
5544 * easier matching of a complete chained recipe
5547 ice_collect_result_idx(&buf[buf_idx],
5548 &sw->recp_list[rm->root_rid]);
5550 recp->n_ext_words = entry->r_group.n_val_pairs;
5551 recp->chain_idx = entry->chain_idx;
5552 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5553 recp->n_grp_count = rm->n_grp_count;
5554 recp->tun_type = rm->tun_type;
5555 recp->recp_created = true;
5570 * ice_create_recipe_group - creates recipe group
5571 * @hw: pointer to hardware structure
5572 * @rm: recipe management list entry
5573 * @lkup_exts: lookup elements
5575 static enum ice_status
5576 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5577 struct ice_prot_lkup_ext *lkup_exts)
5579 enum ice_status status;
5582 rm->n_grp_count = 0;
5584 /* Create recipes for words that are marked not done by packing them
5587 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5588 &rm->rg_list, &recp_count);
5590 rm->n_grp_count += recp_count;
5591 rm->n_ext_words = lkup_exts->n_val_words;
5592 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5593 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5594 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5595 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5602 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5603 * @hw: pointer to hardware structure
5604 * @lkups: lookup elements or match criteria for the advanced recipe, one
5605 * structure per protocol header
5606 * @lkups_cnt: number of protocols
5607 * @bm: bitmap of field vectors to consider
5608 * @fv_list: pointer to a list that holds the returned field vectors
5610 static enum ice_status
5611 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5612 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5614 enum ice_status status;
5618 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5620 return ICE_ERR_NO_MEMORY;
5622 for (i = 0; i < lkups_cnt; i++)
5623 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5624 status = ICE_ERR_CFG;
5628 /* Find field vectors that include all specified protocol types */
5629 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5632 ice_free(hw, prot_ids);
5637 * ice_add_special_words - Add words that are not protocols, such as metadata
5638 * @rinfo: other information regarding the rule e.g. priority and action info
5639 * @lkup_exts: lookup word structure
5641 static enum ice_status
5642 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5643 struct ice_prot_lkup_ext *lkup_exts)
5645 /* If this is a tunneled packet, then add recipe index to match the
5646 * tunnel bit in the packet metadata flags.
5648 if (rinfo->tun_type != ICE_NON_TUN) {
5649 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5650 u8 word = lkup_exts->n_val_words++;
5652 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5653 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5655 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5657 return ICE_ERR_MAX_LIMIT;
5664 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5665 * @hw: pointer to hardware structure
5666 * @rinfo: other information regarding the rule e.g. priority and action info
5667 * @bm: pointer to memory for returning the bitmap of field vectors
5670 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5673 enum ice_prof_type prof_type;
5675 switch (rinfo->tun_type) {
5677 prof_type = ICE_PROF_NON_TUN;
5679 case ICE_ALL_TUNNELS:
5680 prof_type = ICE_PROF_TUN_ALL;
5682 case ICE_SW_TUN_VXLAN_GPE:
5683 case ICE_SW_TUN_GENEVE:
5684 case ICE_SW_TUN_VXLAN:
5685 case ICE_SW_TUN_UDP:
5686 case ICE_SW_TUN_GTP:
5687 prof_type = ICE_PROF_TUN_UDP;
5689 case ICE_SW_TUN_NVGRE:
5690 prof_type = ICE_PROF_TUN_GRE;
5692 case ICE_SW_TUN_PPPOE:
5693 prof_type = ICE_PROF_TUN_PPPOE;
5695 case ICE_SW_TUN_AND_NON_TUN:
5697 prof_type = ICE_PROF_ALL;
5701 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5705 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5706 * @hw: pointer to hardware structure
5707 * @lkups: lookup elements or match criteria for the advanced recipe, one
5708 * structure per protocol header
5709 * @lkups_cnt: number of protocols
5710 * @rinfo: other information regarding the rule e.g. priority and action info
5711 * @rid: return the recipe ID of the recipe created
5713 static enum ice_status
5714 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5715 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5717 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5718 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5719 struct ice_prot_lkup_ext *lkup_exts;
5720 struct ice_recp_grp_entry *r_entry;
5721 struct ice_sw_fv_list_entry *fvit;
5722 struct ice_recp_grp_entry *r_tmp;
5723 struct ice_sw_fv_list_entry *tmp;
5724 enum ice_status status = ICE_SUCCESS;
5725 struct ice_sw_recipe *rm;
5726 bool match_tun = false;
5730 return ICE_ERR_PARAM;
5732 lkup_exts = (struct ice_prot_lkup_ext *)
5733 ice_malloc(hw, sizeof(*lkup_exts));
5735 return ICE_ERR_NO_MEMORY;
5737 /* Determine the number of words to be matched and if it exceeds a
5738 * recipe's restrictions
5740 for (i = 0; i < lkups_cnt; i++) {
5743 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5744 status = ICE_ERR_CFG;
5745 goto err_free_lkup_exts;
5748 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5750 status = ICE_ERR_CFG;
5751 goto err_free_lkup_exts;
5755 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5757 status = ICE_ERR_NO_MEMORY;
5758 goto err_free_lkup_exts;
5761 /* Get field vectors that contain fields extracted from all the protocol
5762 * headers being programmed.
5764 INIT_LIST_HEAD(&rm->fv_list);
5765 INIT_LIST_HEAD(&rm->rg_list);
5767 /* Get bitmap of field vectors (profiles) that are compatible with the
5768 * rule request; only these will be searched in the subsequent call to
5771 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5773 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5777 /* Group match words into recipes using preferred recipe grouping
5780 status = ice_create_recipe_group(hw, rm, lkup_exts);
5784 /* There is only profile for UDP tunnels. So, it is necessary to use a
5785 * metadata ID flag to differentiate different tunnel types. A separate
5786 * recipe needs to be used for the metadata.
5788 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5789 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5790 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5793 /* set the recipe priority if specified */
5794 rm->priority = rinfo->priority ? rinfo->priority : 0;
5796 /* Find offsets from the field vector. Pick the first one for all the
5799 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5803 /* get bitmap of all profiles the recipe will be associated with */
5804 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5805 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5807 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5808 ice_set_bit((u16)fvit->profile_id, profiles);
5811 /* Create any special protocol/offset pairs, such as looking at tunnel
5812 * bits by extracting metadata
5814 status = ice_add_special_words(rinfo, lkup_exts);
5816 goto err_free_lkup_exts;
5818 /* Look for a recipe which matches our requested fv / mask list */
5819 *rid = ice_find_recp(hw, lkup_exts);
5820 if (*rid < ICE_MAX_NUM_RECIPES)
5821 /* Success if found a recipe that match the existing criteria */
5824 /* Recipe we need does not exist, add a recipe */
5825 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5829 /* Associate all the recipes created with all the profiles in the
5830 * common field vector.
5832 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5834 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5837 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5838 (u8 *)r_bitmap, NULL);
5842 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5843 ICE_MAX_NUM_RECIPES);
5844 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5848 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5851 ice_release_change_lock(hw);
5856 /* Update profile to recipe bitmap array */
5857 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5858 ICE_MAX_NUM_RECIPES);
5860 /* Update recipe to profile bitmap array */
5861 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5862 if (ice_is_bit_set(r_bitmap, j))
5863 ice_set_bit((u16)fvit->profile_id,
5864 recipe_to_profile[j]);
5867 *rid = rm->root_rid;
5868 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5869 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5871 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5872 ice_recp_grp_entry, l_entry) {
5873 LIST_DEL(&r_entry->l_entry);
5874 ice_free(hw, r_entry);
5877 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5879 LIST_DEL(&fvit->list_entry);
5884 ice_free(hw, rm->root_buf);
5889 ice_free(hw, lkup_exts);
5895 * ice_find_dummy_packet - find dummy packet by tunnel type
5897 * @lkups: lookup elements or match criteria for the advanced recipe, one
5898 * structure per protocol header
5899 * @lkups_cnt: number of protocols
5900 * @tun_type: tunnel type from the match criteria
5901 * @pkt: dummy packet to fill according to filter match criteria
5902 * @pkt_len: packet length of dummy packet
5903 * @offsets: pointer to receive the pointer to the offsets for the packet
5906 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5907 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5909 const struct ice_dummy_pkt_offsets **offsets)
5911 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5914 if (tun_type == ICE_SW_TUN_GTP) {
5915 *pkt = dummy_udp_gtp_packet;
5916 *pkt_len = sizeof(dummy_udp_gtp_packet);
5917 *offsets = dummy_udp_gtp_packet_offsets;
5920 if (tun_type == ICE_SW_TUN_PPPOE) {
5921 *pkt = dummy_pppoe_packet;
5922 *pkt_len = sizeof(dummy_pppoe_packet);
5923 *offsets = dummy_pppoe_packet_offsets;
5926 for (i = 0; i < lkups_cnt; i++) {
5927 if (lkups[i].type == ICE_UDP_ILOS)
5929 else if (lkups[i].type == ICE_TCP_IL)
5931 else if (lkups[i].type == ICE_IPV6_OFOS)
5933 else if (lkups[i].type == ICE_VLAN_OFOS)
5937 if (tun_type == ICE_ALL_TUNNELS) {
5938 *pkt = dummy_gre_udp_packet;
5939 *pkt_len = sizeof(dummy_gre_udp_packet);
5940 *offsets = dummy_gre_udp_packet_offsets;
5944 if (tun_type == ICE_SW_TUN_NVGRE) {
5946 *pkt = dummy_gre_tcp_packet;
5947 *pkt_len = sizeof(dummy_gre_tcp_packet);
5948 *offsets = dummy_gre_tcp_packet_offsets;
5952 *pkt = dummy_gre_udp_packet;
5953 *pkt_len = sizeof(dummy_gre_udp_packet);
5954 *offsets = dummy_gre_udp_packet_offsets;
5958 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5959 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5961 *pkt = dummy_udp_tun_tcp_packet;
5962 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5963 *offsets = dummy_udp_tun_tcp_packet_offsets;
5967 *pkt = dummy_udp_tun_udp_packet;
5968 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5969 *offsets = dummy_udp_tun_udp_packet_offsets;
5975 *pkt = dummy_vlan_udp_packet;
5976 *pkt_len = sizeof(dummy_vlan_udp_packet);
5977 *offsets = dummy_vlan_udp_packet_offsets;
5980 *pkt = dummy_udp_packet;
5981 *pkt_len = sizeof(dummy_udp_packet);
5982 *offsets = dummy_udp_packet_offsets;
5984 } else if (udp && ipv6) {
5986 *pkt = dummy_vlan_udp_ipv6_packet;
5987 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5988 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5991 *pkt = dummy_udp_ipv6_packet;
5992 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5993 *offsets = dummy_udp_ipv6_packet_offsets;
5995 } else if ((tcp && ipv6) || ipv6) {
5997 *pkt = dummy_vlan_tcp_ipv6_packet;
5998 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
5999 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6002 *pkt = dummy_tcp_ipv6_packet;
6003 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6004 *offsets = dummy_tcp_ipv6_packet_offsets;
6009 *pkt = dummy_vlan_tcp_packet;
6010 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6011 *offsets = dummy_vlan_tcp_packet_offsets;
6013 *pkt = dummy_tcp_packet;
6014 *pkt_len = sizeof(dummy_tcp_packet);
6015 *offsets = dummy_tcp_packet_offsets;
6020 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6022 * @lkups: lookup elements or match criteria for the advanced recipe, one
6023 * structure per protocol header
6024 * @lkups_cnt: number of protocols
6025 * @s_rule: stores rule information from the match criteria
6026 * @dummy_pkt: dummy packet to fill according to filter match criteria
6027 * @pkt_len: packet length of dummy packet
6028 * @offsets: offset info for the dummy packet
6030 static enum ice_status
6031 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6032 struct ice_aqc_sw_rules_elem *s_rule,
6033 const u8 *dummy_pkt, u16 pkt_len,
6034 const struct ice_dummy_pkt_offsets *offsets)
6039 /* Start with a packet with a pre-defined/dummy content. Then, fill
6040 * in the header values to be looked up or matched.
6042 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6044 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6046 for (i = 0; i < lkups_cnt; i++) {
6047 enum ice_protocol_type type;
6048 u16 offset = 0, len = 0, j;
6051 /* find the start of this layer; it should be found since this
6052 * was already checked when search for the dummy packet
6054 type = lkups[i].type;
6055 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6056 if (type == offsets[j].type) {
6057 offset = offsets[j].offset;
6062 /* this should never happen in a correct calling sequence */
6064 return ICE_ERR_PARAM;
6066 switch (lkups[i].type) {
6069 len = sizeof(struct ice_ether_hdr);
6072 len = sizeof(struct ice_ethtype_hdr);
6075 len = sizeof(struct ice_vlan_hdr);
6079 len = sizeof(struct ice_ipv4_hdr);
6083 len = sizeof(struct ice_ipv6_hdr);
6088 len = sizeof(struct ice_l4_hdr);
6091 len = sizeof(struct ice_sctp_hdr);
6094 len = sizeof(struct ice_nvgre);
6099 len = sizeof(struct ice_udp_tnl_hdr);
6103 len = sizeof(struct ice_udp_gtp_hdr);
6106 len = sizeof(struct ice_pppoe_hdr);
6109 return ICE_ERR_PARAM;
6112 /* the length should be a word multiple */
6113 if (len % ICE_BYTES_PER_WORD)
6116 /* We have the offset to the header start, the length, the
6117 * caller's header values and mask. Use this information to
6118 * copy the data into the dummy packet appropriately based on
6119 * the mask. Note that we need to only write the bits as
6120 * indicated by the mask to make sure we don't improperly write
6121 * over any significant packet data.
6123 for (j = 0; j < len / sizeof(u16); j++)
6124 if (((u16 *)&lkups[i].m_u)[j])
6125 ((u16 *)(pkt + offset))[j] =
6126 (((u16 *)(pkt + offset))[j] &
6127 ~((u16 *)&lkups[i].m_u)[j]) |
6128 (((u16 *)&lkups[i].h_u)[j] &
6129 ((u16 *)&lkups[i].m_u)[j]);
6132 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6138 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6139 * @hw: pointer to the hardware structure
6140 * @tun_type: tunnel type
6141 * @pkt: dummy packet to fill in
6142 * @offsets: offset info for the dummy packet
6144 static enum ice_status
6145 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6146 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6151 case ICE_SW_TUN_AND_NON_TUN:
6152 case ICE_SW_TUN_VXLAN_GPE:
6153 case ICE_SW_TUN_VXLAN:
6154 case ICE_SW_TUN_UDP:
6155 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6159 case ICE_SW_TUN_GENEVE:
6160 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6165 /* Nothing needs to be done for this tunnel type */
6169 /* Find the outer UDP protocol header and insert the port number */
6170 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6171 if (offsets[i].type == ICE_UDP_OF) {
6172 struct ice_l4_hdr *hdr;
6175 offset = offsets[i].offset;
6176 hdr = (struct ice_l4_hdr *)&pkt[offset];
6177 hdr->dst_port = CPU_TO_BE16(open_port);
6187 * ice_find_adv_rule_entry - Search a rule entry
6188 * @hw: pointer to the hardware structure
6189 * @lkups: lookup elements or match criteria for the advanced recipe, one
6190 * structure per protocol header
6191 * @lkups_cnt: number of protocols
6192 * @recp_id: recipe ID for which we are finding the rule
6193 * @rinfo: other information regarding the rule e.g. priority and action info
6195 * Helper function to search for a given advance rule entry
6196 * Returns pointer to entry storing the rule if found
6198 static struct ice_adv_fltr_mgmt_list_entry *
6199 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6200 u16 lkups_cnt, u8 recp_id,
6201 struct ice_adv_rule_info *rinfo)
6203 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6204 struct ice_switch_info *sw = hw->switch_info;
6207 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6208 ice_adv_fltr_mgmt_list_entry, list_entry) {
6209 bool lkups_matched = true;
6211 if (lkups_cnt != list_itr->lkups_cnt)
6213 for (i = 0; i < list_itr->lkups_cnt; i++)
6214 if (memcmp(&list_itr->lkups[i], &lkups[i],
6216 lkups_matched = false;
6219 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6220 rinfo->tun_type == list_itr->rule_info.tun_type &&
6228 * ice_adv_add_update_vsi_list
6229 * @hw: pointer to the hardware structure
6230 * @m_entry: pointer to current adv filter management list entry
6231 * @cur_fltr: filter information from the book keeping entry
6232 * @new_fltr: filter information with the new VSI to be added
6234 * Call AQ command to add or update previously created VSI list with new VSI.
6236 * Helper function to do book keeping associated with adding filter information
6237 * The algorithm to do the booking keeping is described below :
6238 * When a VSI needs to subscribe to a given advanced filter
6239 * if only one VSI has been added till now
6240 * Allocate a new VSI list and add two VSIs
6241 * to this list using switch rule command
6242 * Update the previously created switch rule with the
6243 * newly created VSI list ID
6244 * if a VSI list was previously created
6245 * Add the new VSI to the previously created VSI list set
6246 * using the update switch rule command
6248 static enum ice_status
6249 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6250 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6251 struct ice_adv_rule_info *cur_fltr,
6252 struct ice_adv_rule_info *new_fltr)
6254 enum ice_status status;
6255 u16 vsi_list_id = 0;
6257 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6258 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6259 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6260 return ICE_ERR_NOT_IMPL;
6262 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6263 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6264 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6265 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6266 return ICE_ERR_NOT_IMPL;
6268 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6269 /* Only one entry existed in the mapping and it was not already
6270 * a part of a VSI list. So, create a VSI list with the old and
6273 struct ice_fltr_info tmp_fltr;
6274 u16 vsi_handle_arr[2];
6276 /* A rule already exists with the new VSI being added */
6277 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6278 new_fltr->sw_act.fwd_id.hw_vsi_id)
6279 return ICE_ERR_ALREADY_EXISTS;
6281 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6282 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6283 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6289 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6290 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6291 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6292 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6293 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6295 /* Update the previous switch rule of "forward to VSI" to
6298 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6302 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6303 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6304 m_entry->vsi_list_info =
6305 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6308 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6310 if (!m_entry->vsi_list_info)
6313 /* A rule already exists with the new VSI being added */
6314 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6317 /* Update the previously created VSI list set with
6318 * the new VSI ID passed in
6320 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6322 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6324 ice_aqc_opc_update_sw_rules,
6326 /* update VSI list mapping info with new VSI ID */
6328 ice_set_bit(vsi_handle,
6329 m_entry->vsi_list_info->vsi_map);
6332 m_entry->vsi_count++;
6337 * ice_add_adv_rule - helper function to create an advanced switch rule
6338 * @hw: pointer to the hardware structure
6339 * @lkups: information on the words that needs to be looked up. All words
6340 * together makes one recipe
6341 * @lkups_cnt: num of entries in the lkups array
6342 * @rinfo: other information related to the rule that needs to be programmed
6343 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6344 * ignored is case of error.
6346 * This function can program only 1 rule at a time. The lkups is used to
6347 * describe the all the words that forms the "lookup" portion of the recipe.
6348 * These words can span multiple protocols. Callers to this function need to
6349 * pass in a list of protocol headers with lookup information along and mask
6350 * that determines which words are valid from the given protocol header.
6351 * rinfo describes other information related to this rule such as forwarding
6352 * IDs, priority of this rule, etc.
6355 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6356 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6357 struct ice_rule_query_data *added_entry)
6359 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6360 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6361 const struct ice_dummy_pkt_offsets *pkt_offsets;
6362 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6363 struct LIST_HEAD_TYPE *rule_head;
6364 struct ice_switch_info *sw;
6365 enum ice_status status;
6366 const u8 *pkt = NULL;
6371 /* Initialize profile to result index bitmap */
6372 if (!hw->switch_info->prof_res_bm_init) {
6373 hw->switch_info->prof_res_bm_init = 1;
6374 ice_init_prof_result_bm(hw);
6378 return ICE_ERR_PARAM;
6380 /* get # of words we need to match */
6382 for (i = 0; i < lkups_cnt; i++) {
6385 ptr = (u16 *)&lkups[i].m_u;
6386 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6390 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6391 return ICE_ERR_PARAM;
6393 /* make sure that we can locate a dummy packet */
6394 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6397 status = ICE_ERR_PARAM;
6398 goto err_ice_add_adv_rule;
6401 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6402 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6403 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6404 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6407 vsi_handle = rinfo->sw_act.vsi_handle;
6408 if (!ice_is_vsi_valid(hw, vsi_handle))
6409 return ICE_ERR_PARAM;
6411 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6412 rinfo->sw_act.fwd_id.hw_vsi_id =
6413 ice_get_hw_vsi_num(hw, vsi_handle);
6414 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6415 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6417 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6420 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6422 /* we have to add VSI to VSI_LIST and increment vsi_count.
6423 * Also Update VSI list so that we can change forwarding rule
6424 * if the rule already exists, we will check if it exists with
6425 * same vsi_id, if not then add it to the VSI list if it already
6426 * exists if not then create a VSI list and add the existing VSI
6427 * ID and the new VSI ID to the list
6428 * We will add that VSI to the list
6430 status = ice_adv_add_update_vsi_list(hw, m_entry,
6431 &m_entry->rule_info,
6434 added_entry->rid = rid;
6435 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6436 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6440 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6441 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6443 return ICE_ERR_NO_MEMORY;
6444 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6445 switch (rinfo->sw_act.fltr_act) {
6446 case ICE_FWD_TO_VSI:
6447 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6448 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6449 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6452 act |= ICE_SINGLE_ACT_TO_Q;
6453 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6454 ICE_SINGLE_ACT_Q_INDEX_M;
6456 case ICE_FWD_TO_QGRP:
6457 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6458 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6459 act |= ICE_SINGLE_ACT_TO_Q;
6460 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6461 ICE_SINGLE_ACT_Q_INDEX_M;
6462 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6463 ICE_SINGLE_ACT_Q_REGION_M;
6465 case ICE_DROP_PACKET:
6466 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6467 ICE_SINGLE_ACT_VALID_BIT;
6470 status = ICE_ERR_CFG;
6471 goto err_ice_add_adv_rule;
6474 /* set the rule LOOKUP type based on caller specified 'RX'
6475 * instead of hardcoding it to be either LOOKUP_TX/RX
6477 * for 'RX' set the source to be the port number
6478 * for 'TX' set the source to be the source HW VSI number (determined
6482 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6483 s_rule->pdata.lkup_tx_rx.src =
6484 CPU_TO_LE16(hw->port_info->lport);
6486 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6487 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6490 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6491 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6493 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6494 pkt_len, pkt_offsets);
6496 goto err_ice_add_adv_rule;
6498 if (rinfo->tun_type != ICE_NON_TUN &&
6499 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6500 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6501 s_rule->pdata.lkup_tx_rx.hdr,
6504 goto err_ice_add_adv_rule;
6507 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6508 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6511 goto err_ice_add_adv_rule;
6512 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6513 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6515 status = ICE_ERR_NO_MEMORY;
6516 goto err_ice_add_adv_rule;
6519 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6520 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6521 ICE_NONDMA_TO_NONDMA);
6522 if (!adv_fltr->lkups) {
6523 status = ICE_ERR_NO_MEMORY;
6524 goto err_ice_add_adv_rule;
6527 adv_fltr->lkups_cnt = lkups_cnt;
6528 adv_fltr->rule_info = *rinfo;
6529 adv_fltr->rule_info.fltr_rule_id =
6530 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6531 sw = hw->switch_info;
6532 sw->recp_list[rid].adv_rule = true;
6533 rule_head = &sw->recp_list[rid].filt_rules;
6535 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6536 struct ice_fltr_info tmp_fltr;
6538 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6539 tmp_fltr.fltr_rule_id =
6540 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6541 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6542 tmp_fltr.fwd_id.hw_vsi_id =
6543 ice_get_hw_vsi_num(hw, vsi_handle);
6544 tmp_fltr.vsi_handle = vsi_handle;
6545 /* Update the previous switch rule of "forward to VSI" to
6548 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6550 goto err_ice_add_adv_rule;
6551 adv_fltr->vsi_count = 1;
6554 /* Add rule entry to book keeping list */
6555 LIST_ADD(&adv_fltr->list_entry, rule_head);
6557 added_entry->rid = rid;
6558 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6559 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6561 err_ice_add_adv_rule:
6562 if (status && adv_fltr) {
6563 ice_free(hw, adv_fltr->lkups);
6564 ice_free(hw, adv_fltr);
6567 ice_free(hw, s_rule);
6573 * ice_adv_rem_update_vsi_list
6574 * @hw: pointer to the hardware structure
6575 * @vsi_handle: VSI handle of the VSI to remove
6576 * @fm_list: filter management entry for which the VSI list management needs to
6579 static enum ice_status
6580 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6581 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6583 struct ice_vsi_list_map_info *vsi_list_info;
6584 enum ice_sw_lkup_type lkup_type;
6585 enum ice_status status;
6588 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6589 fm_list->vsi_count == 0)
6590 return ICE_ERR_PARAM;
6592 /* A rule with the VSI being removed does not exist */
6593 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6594 return ICE_ERR_DOES_NOT_EXIST;
6596 lkup_type = ICE_SW_LKUP_LAST;
6597 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6598 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6599 ice_aqc_opc_update_sw_rules,
6604 fm_list->vsi_count--;
6605 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6606 vsi_list_info = fm_list->vsi_list_info;
6607 if (fm_list->vsi_count == 1) {
6608 struct ice_fltr_info tmp_fltr;
6611 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6613 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6614 return ICE_ERR_OUT_OF_RANGE;
6616 /* Make sure VSI list is empty before removing it below */
6617 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6619 ice_aqc_opc_update_sw_rules,
6624 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6625 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6626 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6627 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6628 tmp_fltr.fwd_id.hw_vsi_id =
6629 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6630 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6631 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6633 /* Update the previous switch rule of "MAC forward to VSI" to
6634 * "MAC fwd to VSI list"
6636 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6638 ice_debug(hw, ICE_DBG_SW,
6639 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6640 tmp_fltr.fwd_id.hw_vsi_id, status);
6644 /* Remove the VSI list since it is no longer used */
6645 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6647 ice_debug(hw, ICE_DBG_SW,
6648 "Failed to remove VSI list %d, error %d\n",
6649 vsi_list_id, status);
6653 LIST_DEL(&vsi_list_info->list_entry);
6654 ice_free(hw, vsi_list_info);
6655 fm_list->vsi_list_info = NULL;
6662 * ice_rem_adv_rule - removes existing advanced switch rule
6663 * @hw: pointer to the hardware structure
6664 * @lkups: information on the words that needs to be looked up. All words
6665 * together makes one recipe
6666 * @lkups_cnt: num of entries in the lkups array
6667 * @rinfo: Its the pointer to the rule information for the rule
6669 * This function can be used to remove 1 rule at a time. The lkups is
6670 * used to describe all the words that forms the "lookup" portion of the
6671 * rule. These words can span multiple protocols. Callers to this function
6672 * need to pass in a list of protocol headers with lookup information along
6673 * and mask that determines which words are valid from the given protocol
6674 * header. rinfo describes other information related to this rule such as
6675 * forwarding IDs, priority of this rule, etc.
6678 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6679 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6681 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6682 struct ice_prot_lkup_ext lkup_exts;
6683 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6684 enum ice_status status = ICE_SUCCESS;
6685 bool remove_rule = false;
6686 u16 i, rid, vsi_handle;
6688 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6689 for (i = 0; i < lkups_cnt; i++) {
6692 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6695 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6700 /* Create any special protocol/offset pairs, such as looking at tunnel
6701 * bits by extracting metadata
6703 status = ice_add_special_words(rinfo, &lkup_exts);
6707 rid = ice_find_recp(hw, &lkup_exts);
6708 /* If did not find a recipe that match the existing criteria */
6709 if (rid == ICE_MAX_NUM_RECIPES)
6710 return ICE_ERR_PARAM;
6712 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6713 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6714 /* the rule is already removed */
6717 ice_acquire_lock(rule_lock);
6718 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6720 } else if (list_elem->vsi_count > 1) {
6721 list_elem->vsi_list_info->ref_cnt--;
6722 remove_rule = false;
6723 vsi_handle = rinfo->sw_act.vsi_handle;
6724 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6726 vsi_handle = rinfo->sw_act.vsi_handle;
6727 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6729 ice_release_lock(rule_lock);
6732 if (list_elem->vsi_count == 0)
6735 ice_release_lock(rule_lock);
6737 struct ice_aqc_sw_rules_elem *s_rule;
6740 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6742 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6745 return ICE_ERR_NO_MEMORY;
6746 s_rule->pdata.lkup_tx_rx.act = 0;
6747 s_rule->pdata.lkup_tx_rx.index =
6748 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6749 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6750 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6752 ice_aqc_opc_remove_sw_rules, NULL);
6753 if (status == ICE_SUCCESS) {
6754 ice_acquire_lock(rule_lock);
6755 LIST_DEL(&list_elem->list_entry);
6756 ice_free(hw, list_elem->lkups);
6757 ice_free(hw, list_elem);
6758 ice_release_lock(rule_lock);
6760 ice_free(hw, s_rule);
6766 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6767 * @hw: pointer to the hardware structure
6768 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6770 * This function is used to remove 1 rule at a time. The removal is based on
6771 * the remove_entry parameter. This function will remove rule for a given
6772 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6775 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6776 struct ice_rule_query_data *remove_entry)
6778 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6779 struct LIST_HEAD_TYPE *list_head;
6780 struct ice_adv_rule_info rinfo;
6781 struct ice_switch_info *sw;
6783 sw = hw->switch_info;
6784 if (!sw->recp_list[remove_entry->rid].recp_created)
6785 return ICE_ERR_PARAM;
6786 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6787 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6789 if (list_itr->rule_info.fltr_rule_id ==
6790 remove_entry->rule_id) {
6791 rinfo = list_itr->rule_info;
6792 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6793 return ice_rem_adv_rule(hw, list_itr->lkups,
6794 list_itr->lkups_cnt, &rinfo);
6797 return ICE_ERR_PARAM;
6801 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6803 * @hw: pointer to the hardware structure
6804 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6806 * This function is used to remove all the rules for a given VSI and as soon
6807 * as removing a rule fails, it will return immediately with the error code,
6808 * else it will return ICE_SUCCESS
6811 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6813 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6814 struct ice_vsi_list_map_info *map_info;
6815 struct LIST_HEAD_TYPE *list_head;
6816 struct ice_adv_rule_info rinfo;
6817 struct ice_switch_info *sw;
6818 enum ice_status status;
6819 u16 vsi_list_id = 0;
6822 sw = hw->switch_info;
6823 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6824 if (!sw->recp_list[rid].recp_created)
6826 if (!sw->recp_list[rid].adv_rule)
6828 list_head = &sw->recp_list[rid].filt_rules;
6830 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6831 ice_adv_fltr_mgmt_list_entry, list_entry) {
6832 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6837 rinfo = list_itr->rule_info;
6838 rinfo.sw_act.vsi_handle = vsi_handle;
6839 status = ice_rem_adv_rule(hw, list_itr->lkups,
6840 list_itr->lkups_cnt, &rinfo);
6850 * ice_replay_fltr - Replay all the filters stored by a specific list head
6851 * @hw: pointer to the hardware structure
6852 * @list_head: list for which filters needs to be replayed
6853 * @recp_id: Recipe ID for which rules need to be replayed
6855 static enum ice_status
6856 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6858 struct ice_fltr_mgmt_list_entry *itr;
6859 enum ice_status status = ICE_SUCCESS;
6860 struct ice_sw_recipe *recp_list;
6861 u8 lport = hw->port_info->lport;
6862 struct LIST_HEAD_TYPE l_head;
6864 if (LIST_EMPTY(list_head))
6867 recp_list = &hw->switch_info->recp_list[recp_id];
6868 /* Move entries from the given list_head to a temporary l_head so that
6869 * they can be replayed. Otherwise when trying to re-add the same
6870 * filter, the function will return already exists
6872 LIST_REPLACE_INIT(list_head, &l_head);
6874 /* Mark the given list_head empty by reinitializing it so filters
6875 * could be added again by *handler
6877 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6879 struct ice_fltr_list_entry f_entry;
6881 f_entry.fltr_info = itr->fltr_info;
6882 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6883 status = ice_add_rule_internal(hw, recp_list, lport,
6885 if (status != ICE_SUCCESS)
6890 /* Add a filter per VSI separately */
6895 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6897 if (!ice_is_vsi_valid(hw, vsi_handle))
6900 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6901 f_entry.fltr_info.vsi_handle = vsi_handle;
6902 f_entry.fltr_info.fwd_id.hw_vsi_id =
6903 ice_get_hw_vsi_num(hw, vsi_handle);
6904 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6905 if (recp_id == ICE_SW_LKUP_VLAN)
6906 status = ice_add_vlan_internal(hw, recp_list,
6909 status = ice_add_rule_internal(hw, recp_list,
6912 if (status != ICE_SUCCESS)
6917 /* Clear the filter management list */
6918 ice_rem_sw_rule_info(hw, &l_head);
6923 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6924 * @hw: pointer to the hardware structure
6926 * NOTE: This function does not clean up partially added filters on error.
6927 * It is up to caller of the function to issue a reset or fail early.
6929 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6931 struct ice_switch_info *sw = hw->switch_info;
6932 enum ice_status status = ICE_SUCCESS;
6935 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6936 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6938 status = ice_replay_fltr(hw, i, head);
6939 if (status != ICE_SUCCESS)
6946 * ice_replay_vsi_fltr - Replay filters for requested VSI
6947 * @hw: pointer to the hardware structure
6948 * @vsi_handle: driver VSI handle
6949 * @recp_id: Recipe ID for which rules need to be replayed
6950 * @list_head: list for which filters need to be replayed
6952 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6953 * It is required to pass valid VSI handle.
6955 static enum ice_status
6956 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6957 struct LIST_HEAD_TYPE *list_head)
6959 struct ice_fltr_mgmt_list_entry *itr;
6960 enum ice_status status = ICE_SUCCESS;
6961 struct ice_sw_recipe *recp_list;
6964 if (LIST_EMPTY(list_head))
6966 recp_list = &hw->switch_info->recp_list[recp_id];
6967 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6969 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6971 struct ice_fltr_list_entry f_entry;
6973 f_entry.fltr_info = itr->fltr_info;
6974 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6975 itr->fltr_info.vsi_handle == vsi_handle) {
6976 /* update the src in case it is VSI num */
6977 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6978 f_entry.fltr_info.src = hw_vsi_id;
6979 status = ice_add_rule_internal(hw, recp_list,
6980 hw->port_info->lport,
6982 if (status != ICE_SUCCESS)
6986 if (!itr->vsi_list_info ||
6987 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6989 /* Clearing it so that the logic can add it back */
6990 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6991 f_entry.fltr_info.vsi_handle = vsi_handle;
6992 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6993 /* update the src in case it is VSI num */
6994 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6995 f_entry.fltr_info.src = hw_vsi_id;
6996 if (recp_id == ICE_SW_LKUP_VLAN)
6997 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
6999 status = ice_add_rule_internal(hw, recp_list,
7000 hw->port_info->lport,
7002 if (status != ICE_SUCCESS)
7010 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7011 * @hw: pointer to the hardware structure
7012 * @vsi_handle: driver VSI handle
7013 * @list_head: list for which filters need to be replayed
7015 * Replay the advanced rule for the given VSI.
7017 static enum ice_status
7018 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7019 struct LIST_HEAD_TYPE *list_head)
7021 struct ice_rule_query_data added_entry = { 0 };
7022 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7023 enum ice_status status = ICE_SUCCESS;
7025 if (LIST_EMPTY(list_head))
7027 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7029 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7030 u16 lk_cnt = adv_fltr->lkups_cnt;
7032 if (vsi_handle != rinfo->sw_act.vsi_handle)
7034 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7043 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7044 * @hw: pointer to the hardware structure
7045 * @vsi_handle: driver VSI handle
7047 * Replays filters for requested VSI via vsi_handle.
7049 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7051 struct ice_switch_info *sw = hw->switch_info;
7052 enum ice_status status;
7055 /* Update the recipes that were created */
7056 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7057 struct LIST_HEAD_TYPE *head;
7059 head = &sw->recp_list[i].filt_replay_rules;
7060 if (!sw->recp_list[i].adv_rule)
7061 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7063 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7064 if (status != ICE_SUCCESS)
7072 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7073 * @hw: pointer to the HW struct
7075 * Deletes the filter replay rules.
7077 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7079 struct ice_switch_info *sw = hw->switch_info;
7085 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7086 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7087 struct LIST_HEAD_TYPE *l_head;
7089 l_head = &sw->recp_list[i].filt_replay_rules;
7090 if (!sw->recp_list[i].adv_rule)
7091 ice_rem_sw_rule_info(hw, l_head);
7093 ice_rem_adv_rule_info(hw, l_head);