1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
72 static const u8 dummy_gre_tcp_packet[] = {
73 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
74 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x00, 0x00, 0x00,
77 0x08, 0x00, /* ICE_ETYPE_OL 12 */
79 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
80 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x2F, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
85 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
86 0x00, 0x00, 0x00, 0x00,
88 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
89 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00,
93 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
94 0x00, 0x00, 0x00, 0x00,
95 0x00, 0x06, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
100 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00,
102 0x50, 0x02, 0x20, 0x00,
103 0x00, 0x00, 0x00, 0x00
106 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
108 { ICE_ETYPE_OL, 12 },
109 { ICE_IPV4_OFOS, 14 },
113 { ICE_UDP_ILOS, 76 },
114 { ICE_PROTOCOL_LAST, 0 },
117 static const u8 dummy_gre_udp_packet[] = {
118 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
119 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x00, 0x00, 0x00,
122 0x08, 0x00, /* ICE_ETYPE_OL 12 */
124 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
125 0x00, 0x00, 0x00, 0x00,
126 0x00, 0x2F, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00,
130 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
131 0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
134 0x00, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00,
138 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
139 0x00, 0x00, 0x00, 0x00,
140 0x00, 0x11, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
142 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
145 0x00, 0x08, 0x00, 0x00,
148 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
150 { ICE_ETYPE_OL, 12 },
151 { ICE_IPV4_OFOS, 14 },
155 { ICE_VXLAN_GPE, 42 },
159 { ICE_PROTOCOL_LAST, 0 },
162 static const u8 dummy_udp_tun_tcp_packet[] = {
163 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
164 0x00, 0x00, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
167 0x08, 0x00, /* ICE_ETYPE_OL 12 */
169 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
170 0x00, 0x01, 0x00, 0x00,
171 0x40, 0x11, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
176 0x00, 0x46, 0x00, 0x00,
178 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
179 0x00, 0x00, 0x00, 0x00,
181 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
182 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00,
186 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
187 0x00, 0x01, 0x00, 0x00,
188 0x40, 0x06, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00,
190 0x00, 0x00, 0x00, 0x00,
192 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
193 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x50, 0x02, 0x20, 0x00,
196 0x00, 0x00, 0x00, 0x00
199 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
201 { ICE_ETYPE_OL, 12 },
202 { ICE_IPV4_OFOS, 14 },
206 { ICE_VXLAN_GPE, 42 },
209 { ICE_UDP_ILOS, 84 },
210 { ICE_PROTOCOL_LAST, 0 },
213 static const u8 dummy_udp_tun_udp_packet[] = {
214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
215 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00,
218 0x08, 0x00, /* ICE_ETYPE_OL 12 */
220 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
221 0x00, 0x01, 0x00, 0x00,
222 0x00, 0x11, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00,
226 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
227 0x00, 0x3a, 0x00, 0x00,
229 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
230 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
233 0x00, 0x00, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00,
237 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
238 0x00, 0x01, 0x00, 0x00,
239 0x00, 0x11, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
244 0x00, 0x08, 0x00, 0x00,
247 /* offset info for MAC + IPv4 + UDP dummy packet */
248 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
250 { ICE_ETYPE_OL, 12 },
251 { ICE_IPV4_OFOS, 14 },
252 { ICE_UDP_ILOS, 34 },
253 { ICE_PROTOCOL_LAST, 0 },
256 /* Dummy packet for MAC + IPv4 + UDP */
257 static const u8 dummy_udp_packet[] = {
258 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
259 0x00, 0x00, 0x00, 0x00,
260 0x00, 0x00, 0x00, 0x00,
262 0x08, 0x00, /* ICE_ETYPE_OL 12 */
264 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
265 0x00, 0x01, 0x00, 0x00,
266 0x00, 0x11, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
271 0x00, 0x08, 0x00, 0x00,
273 0x00, 0x00, /* 2 bytes for 4 byte alignment */
276 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
277 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
279 { ICE_ETYPE_OL, 12 },
280 { ICE_VLAN_OFOS, 14 },
281 { ICE_IPV4_OFOS, 18 },
282 { ICE_UDP_ILOS, 38 },
283 { ICE_PROTOCOL_LAST, 0 },
286 /* C-tag (801.1Q), IPv4:UDP dummy packet */
287 static const u8 dummy_vlan_udp_packet[] = {
288 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
289 0x00, 0x00, 0x00, 0x00,
290 0x00, 0x00, 0x00, 0x00,
292 0x81, 0x00, /* ICE_ETYPE_OL 12 */
294 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
296 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
297 0x00, 0x01, 0x00, 0x00,
298 0x00, 0x11, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
303 0x00, 0x08, 0x00, 0x00,
305 0x00, 0x00, /* 2 bytes for 4 byte alignment */
308 /* offset info for MAC + IPv4 + TCP dummy packet */
309 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
311 { ICE_ETYPE_OL, 12 },
312 { ICE_IPV4_OFOS, 14 },
314 { ICE_PROTOCOL_LAST, 0 },
317 /* Dummy packet for MAC + IPv4 + TCP */
318 static const u8 dummy_tcp_packet[] = {
319 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
320 0x00, 0x00, 0x00, 0x00,
321 0x00, 0x00, 0x00, 0x00,
323 0x08, 0x00, /* ICE_ETYPE_OL 12 */
325 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
326 0x00, 0x01, 0x00, 0x00,
327 0x00, 0x06, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
332 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00,
334 0x50, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, /* 2 bytes for 4 byte alignment */
340 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
341 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
343 { ICE_ETYPE_OL, 12 },
344 { ICE_VLAN_OFOS, 14 },
345 { ICE_IPV4_OFOS, 18 },
347 { ICE_PROTOCOL_LAST, 0 },
350 /* C-tag (801.1Q), IPv4:TCP dummy packet */
351 static const u8 dummy_vlan_tcp_packet[] = {
352 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
353 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00,
356 0x81, 0x00, /* ICE_ETYPE_OL 12 */
358 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
360 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
361 0x00, 0x01, 0x00, 0x00,
362 0x00, 0x06, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
367 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00,
369 0x50, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, /* 2 bytes for 4 byte alignment */
375 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
377 { ICE_ETYPE_OL, 12 },
378 { ICE_IPV6_OFOS, 14 },
380 { ICE_PROTOCOL_LAST, 0 },
383 static const u8 dummy_tcp_ipv6_packet[] = {
384 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
385 0x00, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00,
388 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
390 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
391 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
392 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
404 0x50, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, /* 2 bytes for 4 byte alignment */
410 /* C-tag (802.1Q): IPv6 + TCP */
411 static const struct ice_dummy_pkt_offsets
412 dummy_vlan_tcp_ipv6_packet_offsets[] = {
414 { ICE_ETYPE_OL, 12 },
415 { ICE_VLAN_OFOS, 14 },
416 { ICE_IPV6_OFOS, 18 },
418 { ICE_PROTOCOL_LAST, 0 },
421 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
422 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
423 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
427 0x81, 0x00, /* ICE_ETYPE_OL 12 */
429 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
431 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
432 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
445 0x50, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
448 0x00, 0x00, /* 2 bytes for 4 byte alignment */
452 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
454 { ICE_ETYPE_OL, 12 },
455 { ICE_IPV6_OFOS, 14 },
456 { ICE_UDP_ILOS, 54 },
457 { ICE_PROTOCOL_LAST, 0 },
460 /* IPv6 + UDP dummy packet */
461 static const u8 dummy_udp_ipv6_packet[] = {
462 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
463 0x00, 0x00, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00,
466 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
468 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
469 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
470 0x00, 0x00, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
480 0x00, 0x08, 0x00, 0x00,
482 0x00, 0x00, /* 2 bytes for 4 byte alignment */
485 /* C-tag (802.1Q): IPv6 + UDP */
486 static const struct ice_dummy_pkt_offsets
487 dummy_vlan_udp_ipv6_packet_offsets[] = {
489 { ICE_ETYPE_OL, 12 },
490 { ICE_VLAN_OFOS, 14 },
491 { ICE_IPV6_OFOS, 18 },
492 { ICE_UDP_ILOS, 58 },
493 { ICE_PROTOCOL_LAST, 0 },
496 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
497 static const u8 dummy_vlan_udp_ipv6_packet[] = {
498 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
499 0x00, 0x00, 0x00, 0x00,
500 0x00, 0x00, 0x00, 0x00,
502 0x81, 0x00, /* ICE_ETYPE_OL 12 */
504 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
506 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
507 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
508 0x00, 0x00, 0x00, 0x00,
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
518 0x00, 0x08, 0x00, 0x00,
520 0x00, 0x00, /* 2 bytes for 4 byte alignment */
523 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
525 { ICE_IPV4_OFOS, 14 },
528 { ICE_PROTOCOL_LAST, 0 },
531 static const u8 dummy_udp_gtp_packet[] = {
532 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
533 0x00, 0x00, 0x00, 0x00,
534 0x00, 0x00, 0x00, 0x00,
537 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
538 0x00, 0x00, 0x00, 0x00,
539 0x00, 0x11, 0x00, 0x00,
540 0x00, 0x00, 0x00, 0x00,
541 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
544 0x00, 0x1c, 0x00, 0x00,
546 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x00, 0x00, 0x85,
550 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
551 0x00, 0x00, 0x00, 0x00,
554 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
556 { ICE_ETYPE_OL, 12 },
557 { ICE_VLAN_OFOS, 14},
559 { ICE_PROTOCOL_LAST, 0 },
562 static const u8 dummy_pppoe_packet[] = {
563 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
564 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x00,
567 0x81, 0x00, /* ICE_ETYPE_OL 12 */
569 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
571 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
574 0x00, 0x21, /* PPP Link Layer 24 */
576 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
585 /* this is a recipe to profile association bitmap */
586 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
587 ICE_MAX_NUM_PROFILES);
589 /* this is a profile to recipe association bitmap */
590 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
591 ICE_MAX_NUM_RECIPES);
593 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
596 * ice_collect_result_idx - copy result index values
597 * @buf: buffer that contains the result index
598 * @recp: the recipe struct to copy data into
600 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
601 struct ice_sw_recipe *recp)
603 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
604 ice_set_bit(buf->content.result_indx &
605 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
609 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
610 * @hw: pointer to hardware structure
611 * @recps: struct that we need to populate
612 * @rid: recipe ID that we are populating
613 * @refresh_required: true if we should get recipe to profile mapping from FW
615 * This function is used to populate all the necessary entries into our
616 * bookkeeping so that we have a current list of all the recipes that are
617 * programmed in the firmware.
619 static enum ice_status
620 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
621 bool *refresh_required)
623 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
624 struct ice_aqc_recipe_data_elem *tmp;
625 u16 num_recps = ICE_MAX_NUM_RECIPES;
626 struct ice_prot_lkup_ext *lkup_exts;
627 enum ice_status status;
631 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
633 /* we need a buffer big enough to accommodate all the recipes */
634 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
635 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
637 return ICE_ERR_NO_MEMORY;
639 tmp[0].recipe_indx = rid;
640 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
641 /* non-zero status meaning recipe doesn't exist */
645 /* Get recipe to profile map so that we can get the fv from lkups that
646 * we read for a recipe from FW. Since we want to minimize the number of
647 * times we make this FW call, just make one call and cache the copy
648 * until a new recipe is added. This operation is only required the
649 * first time to get the changes from FW. Then to search existing
650 * entries we don't need to update the cache again until another recipe
653 if (*refresh_required) {
654 ice_get_recp_to_prof_map(hw);
655 *refresh_required = false;
658 /* Start populating all the entries for recps[rid] based on lkups from
659 * firmware. Note that we are only creating the root recipe in our
662 lkup_exts = &recps[rid].lkup_exts;
664 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
665 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
666 struct ice_recp_grp_entry *rg_entry;
667 u8 i, prof, idx, prot = 0;
671 rg_entry = (struct ice_recp_grp_entry *)
672 ice_malloc(hw, sizeof(*rg_entry));
674 status = ICE_ERR_NO_MEMORY;
678 idx = root_bufs.recipe_indx;
679 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
681 /* Mark all result indices in this chain */
682 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
683 ice_set_bit(root_bufs.content.result_indx &
684 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
686 /* get the first profile that is associated with rid */
687 prof = ice_find_first_bit(recipe_to_profile[idx],
688 ICE_MAX_NUM_PROFILES);
689 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
690 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
692 rg_entry->fv_idx[i] = lkup_indx;
693 rg_entry->fv_mask[i] =
694 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
696 /* If the recipe is a chained recipe then all its
697 * child recipe's result will have a result index.
698 * To fill fv_words we should not use those result
699 * index, we only need the protocol ids and offsets.
700 * We will skip all the fv_idx which stores result
701 * index in them. We also need to skip any fv_idx which
702 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
703 * valid offset value.
705 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
706 rg_entry->fv_idx[i]) ||
707 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
708 rg_entry->fv_idx[i] == 0)
711 ice_find_prot_off(hw, ICE_BLK_SW, prof,
712 rg_entry->fv_idx[i], &prot, &off);
713 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
714 lkup_exts->fv_words[fv_word_idx].off = off;
717 /* populate rg_list with the data from the child entry of this
720 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
722 /* Propagate some data to the recipe database */
723 recps[idx].is_root = !!is_root;
724 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
725 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
726 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
727 recps[idx].chain_idx = root_bufs.content.result_indx &
728 ~ICE_AQ_RECIPE_RESULT_EN;
729 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
731 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
737 /* Only do the following for root recipes entries */
738 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
739 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
740 recps[idx].root_rid = root_bufs.content.rid &
741 ~ICE_AQ_RECIPE_ID_IS_ROOT;
742 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
745 /* Complete initialization of the root recipe entry */
746 lkup_exts->n_val_words = fv_word_idx;
747 recps[rid].big_recp = (num_recps > 1);
748 recps[rid].n_grp_count = (u8)num_recps;
749 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
750 ice_memdup(hw, tmp, recps[rid].n_grp_count *
751 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
752 if (!recps[rid].root_buf)
755 /* Copy result indexes */
756 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
757 recps[rid].recp_created = true;
765 * ice_get_recp_to_prof_map - updates recipe to profile mapping
766 * @hw: pointer to hardware structure
768 * This function is used to populate recipe_to_profile matrix where index to
769 * this array is the recipe ID and the element is the mapping of which profiles
770 * is this recipe mapped to.
773 ice_get_recp_to_prof_map(struct ice_hw *hw)
775 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
778 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
781 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
782 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
783 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
785 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
786 ICE_MAX_NUM_RECIPES);
787 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
788 if (ice_is_bit_set(r_bitmap, j))
789 ice_set_bit(i, recipe_to_profile[j]);
794 * ice_init_def_sw_recp - initialize the recipe book keeping tables
795 * @hw: pointer to the HW struct
796 * @recp_list: pointer to sw recipe list
798 * Allocate memory for the entire recipe table and initialize the structures/
799 * entries corresponding to basic recipes.
802 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
804 struct ice_sw_recipe *recps;
807 recps = (struct ice_sw_recipe *)
808 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
810 return ICE_ERR_NO_MEMORY;
812 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
813 recps[i].root_rid = i;
814 INIT_LIST_HEAD(&recps[i].filt_rules);
815 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
816 INIT_LIST_HEAD(&recps[i].rg_list);
817 ice_init_lock(&recps[i].filt_rule_lock);
826 * ice_aq_get_sw_cfg - get switch configuration
827 * @hw: pointer to the hardware structure
828 * @buf: pointer to the result buffer
829 * @buf_size: length of the buffer available for response
830 * @req_desc: pointer to requested descriptor
831 * @num_elems: pointer to number of elements
832 * @cd: pointer to command details structure or NULL
834 * Get switch configuration (0x0200) to be placed in 'buff'.
835 * This admin command returns information such as initial VSI/port number
836 * and switch ID it belongs to.
838 * NOTE: *req_desc is both an input/output parameter.
839 * The caller of this function first calls this function with *request_desc set
840 * to 0. If the response from f/w has *req_desc set to 0, all the switch
841 * configuration information has been returned; if non-zero (meaning not all
842 * the information was returned), the caller should call this function again
843 * with *req_desc set to the previous value returned by f/w to get the
844 * next block of switch configuration information.
846 * *num_elems is output only parameter. This reflects the number of elements
847 * in response buffer. The caller of this function to use *num_elems while
848 * parsing the response buffer.
850 static enum ice_status
851 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
852 u16 buf_size, u16 *req_desc, u16 *num_elems,
853 struct ice_sq_cd *cd)
855 struct ice_aqc_get_sw_cfg *cmd;
856 enum ice_status status;
857 struct ice_aq_desc desc;
859 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
860 cmd = &desc.params.get_sw_conf;
861 cmd->element = CPU_TO_LE16(*req_desc);
863 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
865 *req_desc = LE16_TO_CPU(cmd->element);
866 *num_elems = LE16_TO_CPU(cmd->num_elems);
873 * ice_alloc_sw - allocate resources specific to switch
874 * @hw: pointer to the HW struct
875 * @ena_stats: true to turn on VEB stats
876 * @shared_res: true for shared resource, false for dedicated resource
877 * @sw_id: switch ID returned
878 * @counter_id: VEB counter ID returned
880 * allocates switch resources (SWID and VEB counter) (0x0208)
883 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
886 struct ice_aqc_alloc_free_res_elem *sw_buf;
887 struct ice_aqc_res_elem *sw_ele;
888 enum ice_status status;
891 buf_len = sizeof(*sw_buf);
892 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
893 ice_malloc(hw, buf_len);
895 return ICE_ERR_NO_MEMORY;
897 /* Prepare buffer for switch ID.
898 * The number of resource entries in buffer is passed as 1 since only a
899 * single switch/VEB instance is allocated, and hence a single sw_id
902 sw_buf->num_elems = CPU_TO_LE16(1);
904 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
905 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
906 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
908 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
909 ice_aqc_opc_alloc_res, NULL);
912 goto ice_alloc_sw_exit;
914 sw_ele = &sw_buf->elem[0];
915 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
918 /* Prepare buffer for VEB Counter */
919 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
920 struct ice_aqc_alloc_free_res_elem *counter_buf;
921 struct ice_aqc_res_elem *counter_ele;
923 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
924 ice_malloc(hw, buf_len);
926 status = ICE_ERR_NO_MEMORY;
927 goto ice_alloc_sw_exit;
930 /* The number of resource entries in buffer is passed as 1 since
931 * only a single switch/VEB instance is allocated, and hence a
932 * single VEB counter is requested.
934 counter_buf->num_elems = CPU_TO_LE16(1);
935 counter_buf->res_type =
936 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
937 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
938 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
942 ice_free(hw, counter_buf);
943 goto ice_alloc_sw_exit;
945 counter_ele = &counter_buf->elem[0];
946 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
947 ice_free(hw, counter_buf);
951 ice_free(hw, sw_buf);
956 * ice_free_sw - free resources specific to switch
957 * @hw: pointer to the HW struct
958 * @sw_id: switch ID returned
959 * @counter_id: VEB counter ID returned
961 * free switch resources (SWID and VEB counter) (0x0209)
963 * NOTE: This function frees multiple resources. It continues
964 * releasing other resources even after it encounters error.
965 * The error code returned is the last error it encountered.
967 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
969 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
970 enum ice_status status, ret_status;
973 buf_len = sizeof(*sw_buf);
974 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
975 ice_malloc(hw, buf_len);
977 return ICE_ERR_NO_MEMORY;
979 /* Prepare buffer to free for switch ID res.
980 * The number of resource entries in buffer is passed as 1 since only a
981 * single switch/VEB instance is freed, and hence a single sw_id
984 sw_buf->num_elems = CPU_TO_LE16(1);
985 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
986 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
988 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
989 ice_aqc_opc_free_res, NULL);
992 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
994 /* Prepare buffer to free for VEB Counter resource */
995 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
996 ice_malloc(hw, buf_len);
998 ice_free(hw, sw_buf);
999 return ICE_ERR_NO_MEMORY;
1002 /* The number of resource entries in buffer is passed as 1 since only a
1003 * single switch/VEB instance is freed, and hence a single VEB counter
1006 counter_buf->num_elems = CPU_TO_LE16(1);
1007 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1008 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1010 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1011 ice_aqc_opc_free_res, NULL);
1013 ice_debug(hw, ICE_DBG_SW,
1014 "VEB counter resource could not be freed\n");
1015 ret_status = status;
1018 ice_free(hw, counter_buf);
1019 ice_free(hw, sw_buf);
1025 * @hw: pointer to the HW struct
1026 * @vsi_ctx: pointer to a VSI context struct
1027 * @cd: pointer to command details structure or NULL
1029 * Add a VSI context to the hardware (0x0210)
1032 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1033 struct ice_sq_cd *cd)
1035 struct ice_aqc_add_update_free_vsi_resp *res;
1036 struct ice_aqc_add_get_update_free_vsi *cmd;
1037 struct ice_aq_desc desc;
1038 enum ice_status status;
1040 cmd = &desc.params.vsi_cmd;
1041 res = &desc.params.add_update_free_vsi_res;
1043 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1045 if (!vsi_ctx->alloc_from_pool)
1046 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1047 ICE_AQ_VSI_IS_VALID);
1049 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1051 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1053 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1054 sizeof(vsi_ctx->info), cd);
1057 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1058 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1059 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1067 * @hw: pointer to the HW struct
1068 * @vsi_ctx: pointer to a VSI context struct
1069 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1070 * @cd: pointer to command details structure or NULL
1072 * Free VSI context info from hardware (0x0213)
1075 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1076 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1078 struct ice_aqc_add_update_free_vsi_resp *resp;
1079 struct ice_aqc_add_get_update_free_vsi *cmd;
1080 struct ice_aq_desc desc;
1081 enum ice_status status;
1083 cmd = &desc.params.vsi_cmd;
1084 resp = &desc.params.add_update_free_vsi_res;
1086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1088 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1090 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1092 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1094 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1095 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1103 * @hw: pointer to the HW struct
1104 * @vsi_ctx: pointer to a VSI context struct
1105 * @cd: pointer to command details structure or NULL
1107 * Update VSI context in the hardware (0x0211)
1110 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1111 struct ice_sq_cd *cd)
1113 struct ice_aqc_add_update_free_vsi_resp *resp;
1114 struct ice_aqc_add_get_update_free_vsi *cmd;
1115 struct ice_aq_desc desc;
1116 enum ice_status status;
1118 cmd = &desc.params.vsi_cmd;
1119 resp = &desc.params.add_update_free_vsi_res;
1121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1123 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1125 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1127 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1128 sizeof(vsi_ctx->info), cd);
1131 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1132 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1139 * ice_is_vsi_valid - check whether the VSI is valid or not
1140 * @hw: pointer to the HW struct
1141 * @vsi_handle: VSI handle
1143 * check whether the VSI is valid or not
1145 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1147 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1151 * ice_get_hw_vsi_num - return the HW VSI number
1152 * @hw: pointer to the HW struct
1153 * @vsi_handle: VSI handle
1155 * return the HW VSI number
1156 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1158 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1160 return hw->vsi_ctx[vsi_handle]->vsi_num;
1164 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1165 * @hw: pointer to the HW struct
1166 * @vsi_handle: VSI handle
1168 * return the VSI context entry for a given VSI handle
1170 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1172 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1176 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1177 * @hw: pointer to the HW struct
1178 * @vsi_handle: VSI handle
1179 * @vsi: VSI context pointer
1181 * save the VSI context entry for a given VSI handle
1184 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1186 hw->vsi_ctx[vsi_handle] = vsi;
1190 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1191 * @hw: pointer to the HW struct
1192 * @vsi_handle: VSI handle
1194 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1196 struct ice_vsi_ctx *vsi;
1199 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1202 ice_for_each_traffic_class(i) {
1203 if (vsi->lan_q_ctx[i]) {
1204 ice_free(hw, vsi->lan_q_ctx[i]);
1205 vsi->lan_q_ctx[i] = NULL;
1211 * ice_clear_vsi_ctx - clear the VSI context entry
1212 * @hw: pointer to the HW struct
1213 * @vsi_handle: VSI handle
1215 * clear the VSI context entry
1217 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1219 struct ice_vsi_ctx *vsi;
1221 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1223 ice_clear_vsi_q_ctx(hw, vsi_handle);
1225 hw->vsi_ctx[vsi_handle] = NULL;
1230 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1231 * @hw: pointer to the HW struct
1233 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1237 for (i = 0; i < ICE_MAX_VSI; i++)
1238 ice_clear_vsi_ctx(hw, i);
1242 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1243 * @hw: pointer to the HW struct
1244 * @vsi_handle: unique VSI handle provided by drivers
1245 * @vsi_ctx: pointer to a VSI context struct
1246 * @cd: pointer to command details structure or NULL
1248 * Add a VSI context to the hardware also add it into the VSI handle list.
1249 * If this function gets called after reset for existing VSIs then update
1250 * with the new HW VSI number in the corresponding VSI handle list entry.
1253 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1254 struct ice_sq_cd *cd)
1256 struct ice_vsi_ctx *tmp_vsi_ctx;
1257 enum ice_status status;
1259 if (vsi_handle >= ICE_MAX_VSI)
1260 return ICE_ERR_PARAM;
1261 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1264 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1266 /* Create a new VSI context */
1267 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1268 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1270 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1271 return ICE_ERR_NO_MEMORY;
1273 *tmp_vsi_ctx = *vsi_ctx;
1275 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1277 /* update with new HW VSI num */
1278 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1285 * ice_free_vsi- free VSI context from hardware and VSI handle list
1286 * @hw: pointer to the HW struct
1287 * @vsi_handle: unique VSI handle
1288 * @vsi_ctx: pointer to a VSI context struct
1289 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1290 * @cd: pointer to command details structure or NULL
1292 * Free VSI context info from hardware as well as from VSI handle list
1295 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1296 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1298 enum ice_status status;
1300 if (!ice_is_vsi_valid(hw, vsi_handle))
1301 return ICE_ERR_PARAM;
1302 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1303 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1305 ice_clear_vsi_ctx(hw, vsi_handle);
1311 * @hw: pointer to the HW struct
1312 * @vsi_handle: unique VSI handle
1313 * @vsi_ctx: pointer to a VSI context struct
1314 * @cd: pointer to command details structure or NULL
1316 * Update VSI context in the hardware
1319 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1320 struct ice_sq_cd *cd)
1322 if (!ice_is_vsi_valid(hw, vsi_handle))
1323 return ICE_ERR_PARAM;
1324 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1325 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1329 * ice_aq_get_vsi_params
1330 * @hw: pointer to the HW struct
1331 * @vsi_ctx: pointer to a VSI context struct
1332 * @cd: pointer to command details structure or NULL
1334 * Get VSI context info from hardware (0x0212)
1337 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1338 struct ice_sq_cd *cd)
1340 struct ice_aqc_add_get_update_free_vsi *cmd;
1341 struct ice_aqc_get_vsi_resp *resp;
1342 struct ice_aq_desc desc;
1343 enum ice_status status;
1345 cmd = &desc.params.vsi_cmd;
1346 resp = &desc.params.get_vsi_resp;
1348 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1350 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1352 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1353 sizeof(vsi_ctx->info), cd);
1355 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1357 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1358 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1365 * ice_aq_add_update_mir_rule - add/update a mirror rule
1366 * @hw: pointer to the HW struct
1367 * @rule_type: Rule Type
1368 * @dest_vsi: VSI number to which packets will be mirrored
1369 * @count: length of the list
1370 * @mr_buf: buffer for list of mirrored VSI numbers
1371 * @cd: pointer to command details structure or NULL
1374 * Add/Update Mirror Rule (0x260).
1377 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1378 u16 count, struct ice_mir_rule_buf *mr_buf,
1379 struct ice_sq_cd *cd, u16 *rule_id)
1381 struct ice_aqc_add_update_mir_rule *cmd;
1382 struct ice_aq_desc desc;
1383 enum ice_status status;
1384 __le16 *mr_list = NULL;
1387 switch (rule_type) {
1388 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1389 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1390 /* Make sure count and mr_buf are set for these rule_types */
1391 if (!(count && mr_buf))
1392 return ICE_ERR_PARAM;
1394 buf_size = count * sizeof(__le16);
1395 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1397 return ICE_ERR_NO_MEMORY;
1399 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1400 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1401 /* Make sure count and mr_buf are not set for these
1404 if (count || mr_buf)
1405 return ICE_ERR_PARAM;
1408 ice_debug(hw, ICE_DBG_SW,
1409 "Error due to unsupported rule_type %u\n", rule_type);
1410 return ICE_ERR_OUT_OF_RANGE;
1413 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1415 /* Pre-process 'mr_buf' items for add/update of virtual port
1416 * ingress/egress mirroring (but not physical port ingress/egress
1422 for (i = 0; i < count; i++) {
1425 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1427 /* Validate specified VSI number, make sure it is less
1428 * than ICE_MAX_VSI, if not return with error.
1430 if (id >= ICE_MAX_VSI) {
1431 ice_debug(hw, ICE_DBG_SW,
1432 "Error VSI index (%u) out-of-range\n",
1434 ice_free(hw, mr_list);
1435 return ICE_ERR_OUT_OF_RANGE;
1438 /* add VSI to mirror rule */
1441 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1442 else /* remove VSI from mirror rule */
1443 mr_list[i] = CPU_TO_LE16(id);
1447 cmd = &desc.params.add_update_rule;
1448 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1449 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1450 ICE_AQC_RULE_ID_VALID_M);
1451 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1452 cmd->num_entries = CPU_TO_LE16(count);
1453 cmd->dest = CPU_TO_LE16(dest_vsi);
1455 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1457 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1459 ice_free(hw, mr_list);
1465 * ice_aq_delete_mir_rule - delete a mirror rule
1466 * @hw: pointer to the HW struct
1467 * @rule_id: Mirror rule ID (to be deleted)
1468 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1469 * otherwise it is returned to the shared pool
1470 * @cd: pointer to command details structure or NULL
1472 * Delete Mirror Rule (0x261).
1475 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1476 struct ice_sq_cd *cd)
1478 struct ice_aqc_delete_mir_rule *cmd;
1479 struct ice_aq_desc desc;
1481 /* rule_id should be in the range 0...63 */
1482 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1483 return ICE_ERR_OUT_OF_RANGE;
1485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1487 cmd = &desc.params.del_rule;
1488 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1489 cmd->rule_id = CPU_TO_LE16(rule_id);
1492 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1494 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1498 * ice_aq_alloc_free_vsi_list
1499 * @hw: pointer to the HW struct
1500 * @vsi_list_id: VSI list ID returned or used for lookup
1501 * @lkup_type: switch rule filter lookup type
1502 * @opc: switch rules population command type - pass in the command opcode
1504 * allocates or free a VSI list resource
1506 static enum ice_status
1507 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1508 enum ice_sw_lkup_type lkup_type,
1509 enum ice_adminq_opc opc)
1511 struct ice_aqc_alloc_free_res_elem *sw_buf;
1512 struct ice_aqc_res_elem *vsi_ele;
1513 enum ice_status status;
1516 buf_len = sizeof(*sw_buf);
1517 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1518 ice_malloc(hw, buf_len);
1520 return ICE_ERR_NO_MEMORY;
1521 sw_buf->num_elems = CPU_TO_LE16(1);
1523 if (lkup_type == ICE_SW_LKUP_MAC ||
1524 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1525 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1526 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1527 lkup_type == ICE_SW_LKUP_PROMISC ||
1528 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1529 lkup_type == ICE_SW_LKUP_LAST) {
1530 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1531 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1533 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1535 status = ICE_ERR_PARAM;
1536 goto ice_aq_alloc_free_vsi_list_exit;
1539 if (opc == ice_aqc_opc_free_res)
1540 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1542 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1544 goto ice_aq_alloc_free_vsi_list_exit;
1546 if (opc == ice_aqc_opc_alloc_res) {
1547 vsi_ele = &sw_buf->elem[0];
1548 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1551 ice_aq_alloc_free_vsi_list_exit:
1552 ice_free(hw, sw_buf);
1557 * ice_aq_set_storm_ctrl - Sets storm control configuration
1558 * @hw: pointer to the HW struct
1559 * @bcast_thresh: represents the upper threshold for broadcast storm control
1560 * @mcast_thresh: represents the upper threshold for multicast storm control
1561 * @ctl_bitmask: storm control control knobs
1563 * Sets the storm control configuration (0x0280)
1566 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1569 struct ice_aqc_storm_cfg *cmd;
1570 struct ice_aq_desc desc;
1572 cmd = &desc.params.storm_conf;
1574 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1576 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1577 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1578 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1580 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1584 * ice_aq_get_storm_ctrl - gets storm control configuration
1585 * @hw: pointer to the HW struct
1586 * @bcast_thresh: represents the upper threshold for broadcast storm control
1587 * @mcast_thresh: represents the upper threshold for multicast storm control
1588 * @ctl_bitmask: storm control control knobs
1590 * Gets the storm control configuration (0x0281)
1593 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1596 enum ice_status status;
1597 struct ice_aq_desc desc;
1599 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1601 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1603 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1606 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1609 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1612 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1619 * ice_aq_sw_rules - add/update/remove switch rules
1620 * @hw: pointer to the HW struct
1621 * @rule_list: pointer to switch rule population list
1622 * @rule_list_sz: total size of the rule list in bytes
1623 * @num_rules: number of switch rules in the rule_list
1624 * @opc: switch rules population command type - pass in the command opcode
1625 * @cd: pointer to command details structure or NULL
1627 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1629 static enum ice_status
1630 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1631 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1633 struct ice_aq_desc desc;
1635 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1637 if (opc != ice_aqc_opc_add_sw_rules &&
1638 opc != ice_aqc_opc_update_sw_rules &&
1639 opc != ice_aqc_opc_remove_sw_rules)
1640 return ICE_ERR_PARAM;
1642 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1644 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1645 desc.params.sw_rules.num_rules_fltr_entry_index =
1646 CPU_TO_LE16(num_rules);
1647 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1651 * ice_aq_add_recipe - add switch recipe
1652 * @hw: pointer to the HW struct
1653 * @s_recipe_list: pointer to switch rule population list
1654 * @num_recipes: number of switch recipes in the list
1655 * @cd: pointer to command details structure or NULL
1660 ice_aq_add_recipe(struct ice_hw *hw,
1661 struct ice_aqc_recipe_data_elem *s_recipe_list,
1662 u16 num_recipes, struct ice_sq_cd *cd)
1664 struct ice_aqc_add_get_recipe *cmd;
1665 struct ice_aq_desc desc;
1668 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1669 cmd = &desc.params.add_get_recipe;
1670 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1672 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1673 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1675 buf_size = num_recipes * sizeof(*s_recipe_list);
1677 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1681 * ice_aq_get_recipe - get switch recipe
1682 * @hw: pointer to the HW struct
1683 * @s_recipe_list: pointer to switch rule population list
1684 * @num_recipes: pointer to the number of recipes (input and output)
1685 * @recipe_root: root recipe number of recipe(s) to retrieve
1686 * @cd: pointer to command details structure or NULL
1690 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1691 * On output, *num_recipes will equal the number of entries returned in
1694 * The caller must supply enough space in s_recipe_list to hold all possible
1695 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1698 ice_aq_get_recipe(struct ice_hw *hw,
1699 struct ice_aqc_recipe_data_elem *s_recipe_list,
1700 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1702 struct ice_aqc_add_get_recipe *cmd;
1703 struct ice_aq_desc desc;
1704 enum ice_status status;
1707 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1708 return ICE_ERR_PARAM;
1710 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1711 cmd = &desc.params.add_get_recipe;
1712 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1714 cmd->return_index = CPU_TO_LE16(recipe_root);
1715 cmd->num_sub_recipes = 0;
1717 buf_size = *num_recipes * sizeof(*s_recipe_list);
1719 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1720 /* cppcheck-suppress constArgument */
1721 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1727 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1728 * @hw: pointer to the HW struct
1729 * @profile_id: package profile ID to associate the recipe with
1730 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1731 * @cd: pointer to command details structure or NULL
1732 * Recipe to profile association (0x0291)
1735 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1736 struct ice_sq_cd *cd)
1738 struct ice_aqc_recipe_to_profile *cmd;
1739 struct ice_aq_desc desc;
1741 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1742 cmd = &desc.params.recipe_to_profile;
1743 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1744 cmd->profile_id = CPU_TO_LE16(profile_id);
1745 /* Set the recipe ID bit in the bitmask to let the device know which
1746 * profile we are associating the recipe to
1748 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1749 ICE_NONDMA_TO_NONDMA);
1751 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1755 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1756 * @hw: pointer to the HW struct
1757 * @profile_id: package profile ID to associate the recipe with
1758 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1759 * @cd: pointer to command details structure or NULL
1760 * Associate profile ID with given recipe (0x0293)
1763 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1764 struct ice_sq_cd *cd)
1766 struct ice_aqc_recipe_to_profile *cmd;
1767 struct ice_aq_desc desc;
1768 enum ice_status status;
1770 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1771 cmd = &desc.params.recipe_to_profile;
1772 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1773 cmd->profile_id = CPU_TO_LE16(profile_id);
1775 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1777 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1778 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1784 * ice_alloc_recipe - add recipe resource
1785 * @hw: pointer to the hardware structure
1786 * @rid: recipe ID returned as response to AQ call
1788 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1790 struct ice_aqc_alloc_free_res_elem *sw_buf;
1791 enum ice_status status;
1794 buf_len = sizeof(*sw_buf);
1795 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1797 return ICE_ERR_NO_MEMORY;
1799 sw_buf->num_elems = CPU_TO_LE16(1);
1800 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1801 ICE_AQC_RES_TYPE_S) |
1802 ICE_AQC_RES_TYPE_FLAG_SHARED);
1803 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1804 ice_aqc_opc_alloc_res, NULL);
1806 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1807 ice_free(hw, sw_buf);
1812 /* ice_init_port_info - Initialize port_info with switch configuration data
1813 * @pi: pointer to port_info
1814 * @vsi_port_num: VSI number or port number
1815 * @type: Type of switch element (port or VSI)
1816 * @swid: switch ID of the switch the element is attached to
1817 * @pf_vf_num: PF or VF number
1818 * @is_vf: true if the element is a VF, false otherwise
1821 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1822 u16 swid, u16 pf_vf_num, bool is_vf)
1825 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1826 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1828 pi->pf_vf_num = pf_vf_num;
1830 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1831 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1834 ice_debug(pi->hw, ICE_DBG_SW,
1835 "incorrect VSI/port type received\n");
1840 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1841 * @hw: pointer to the hardware structure
1843 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1845 struct ice_aqc_get_sw_cfg_resp *rbuf;
1846 enum ice_status status;
1853 num_total_ports = 1;
1855 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1856 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1859 return ICE_ERR_NO_MEMORY;
1861 /* Multiple calls to ice_aq_get_sw_cfg may be required
1862 * to get all the switch configuration information. The need
1863 * for additional calls is indicated by ice_aq_get_sw_cfg
1864 * writing a non-zero value in req_desc
1867 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1868 &req_desc, &num_elems, NULL);
1873 for (i = 0; i < num_elems; i++) {
1874 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1875 u16 pf_vf_num, swid, vsi_port_num;
1879 ele = rbuf[i].elements;
1880 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1881 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1883 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1884 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1886 swid = LE16_TO_CPU(ele->swid);
1888 if (LE16_TO_CPU(ele->pf_vf_num) &
1889 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1892 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1893 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1896 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1897 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1898 if (j == num_total_ports) {
1899 ice_debug(hw, ICE_DBG_SW,
1900 "more ports than expected\n");
1901 status = ICE_ERR_CFG;
1904 ice_init_port_info(hw->port_info,
1905 vsi_port_num, res_type, swid,
1913 } while (req_desc && !status);
1916 ice_free(hw, (void *)rbuf);
1921 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1922 * @hw: pointer to the hardware structure
1923 * @fi: filter info structure to fill/update
1925 * This helper function populates the lb_en and lan_en elements of the provided
1926 * ice_fltr_info struct using the switch's type and characteristics of the
1927 * switch rule being configured.
1929 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1933 if ((fi->flag & ICE_FLTR_TX) &&
1934 (fi->fltr_act == ICE_FWD_TO_VSI ||
1935 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1936 fi->fltr_act == ICE_FWD_TO_Q ||
1937 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1938 /* Setting LB for prune actions will result in replicated
1939 * packets to the internal switch that will be dropped.
1941 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1944 /* Set lan_en to TRUE if
1945 * 1. The switch is a VEB AND
1947 * 2.1 The lookup is a directional lookup like ethertype,
1948 * promiscuous, ethertype-MAC, promiscuous-VLAN
1949 * and default-port OR
1950 * 2.2 The lookup is VLAN, OR
1951 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1952 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1956 * The switch is a VEPA.
1958 * In all other cases, the LAN enable has to be set to false.
1961 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1962 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1963 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1964 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1965 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1966 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1967 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1968 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1969 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1970 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1979 * ice_fill_sw_rule - Helper function to fill switch rule structure
1980 * @hw: pointer to the hardware structure
1981 * @f_info: entry containing packet forwarding information
1982 * @s_rule: switch rule structure to be filled in based on mac_entry
1983 * @opc: switch rules population command type - pass in the command opcode
1986 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1987 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1989 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1997 if (opc == ice_aqc_opc_remove_sw_rules) {
1998 s_rule->pdata.lkup_tx_rx.act = 0;
1999 s_rule->pdata.lkup_tx_rx.index =
2000 CPU_TO_LE16(f_info->fltr_rule_id);
2001 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2005 eth_hdr_sz = sizeof(dummy_eth_header);
2006 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2008 /* initialize the ether header with a dummy header */
2009 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2010 ice_fill_sw_info(hw, f_info);
2012 switch (f_info->fltr_act) {
2013 case ICE_FWD_TO_VSI:
2014 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2015 ICE_SINGLE_ACT_VSI_ID_M;
2016 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2017 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2018 ICE_SINGLE_ACT_VALID_BIT;
2020 case ICE_FWD_TO_VSI_LIST:
2021 act |= ICE_SINGLE_ACT_VSI_LIST;
2022 act |= (f_info->fwd_id.vsi_list_id <<
2023 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2024 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2025 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2026 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2027 ICE_SINGLE_ACT_VALID_BIT;
2030 act |= ICE_SINGLE_ACT_TO_Q;
2031 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2032 ICE_SINGLE_ACT_Q_INDEX_M;
2034 case ICE_DROP_PACKET:
2035 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2036 ICE_SINGLE_ACT_VALID_BIT;
2038 case ICE_FWD_TO_QGRP:
2039 q_rgn = f_info->qgrp_size > 0 ?
2040 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2041 act |= ICE_SINGLE_ACT_TO_Q;
2042 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2043 ICE_SINGLE_ACT_Q_INDEX_M;
2044 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2045 ICE_SINGLE_ACT_Q_REGION_M;
2052 act |= ICE_SINGLE_ACT_LB_ENABLE;
2054 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2056 switch (f_info->lkup_type) {
2057 case ICE_SW_LKUP_MAC:
2058 daddr = f_info->l_data.mac.mac_addr;
2060 case ICE_SW_LKUP_VLAN:
2061 vlan_id = f_info->l_data.vlan.vlan_id;
2062 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2063 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2064 act |= ICE_SINGLE_ACT_PRUNE;
2065 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2068 case ICE_SW_LKUP_ETHERTYPE_MAC:
2069 daddr = f_info->l_data.ethertype_mac.mac_addr;
2071 case ICE_SW_LKUP_ETHERTYPE:
2072 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2073 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2075 case ICE_SW_LKUP_MAC_VLAN:
2076 daddr = f_info->l_data.mac_vlan.mac_addr;
2077 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2079 case ICE_SW_LKUP_PROMISC_VLAN:
2080 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2082 case ICE_SW_LKUP_PROMISC:
2083 daddr = f_info->l_data.mac_vlan.mac_addr;
2089 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2090 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2091 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2093 /* Recipe set depending on lookup type */
2094 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2095 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2096 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2099 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2100 ICE_NONDMA_TO_NONDMA);
2102 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2103 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2104 *off = CPU_TO_BE16(vlan_id);
2107 /* Create the switch rule with the final dummy Ethernet header */
2108 if (opc != ice_aqc_opc_update_sw_rules)
2109 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2113 * ice_add_marker_act
2114 * @hw: pointer to the hardware structure
2115 * @m_ent: the management entry for which sw marker needs to be added
2116 * @sw_marker: sw marker to tag the Rx descriptor with
2117 * @l_id: large action resource ID
2119 * Create a large action to hold software marker and update the switch rule
2120 * entry pointed by m_ent with newly created large action
2122 static enum ice_status
2123 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2124 u16 sw_marker, u16 l_id)
2126 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2127 /* For software marker we need 3 large actions
2128 * 1. FWD action: FWD TO VSI or VSI LIST
2129 * 2. GENERIC VALUE action to hold the profile ID
2130 * 3. GENERIC VALUE action to hold the software marker ID
2132 const u16 num_lg_acts = 3;
2133 enum ice_status status;
2139 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2140 return ICE_ERR_PARAM;
2142 /* Create two back-to-back switch rules and submit them to the HW using
2143 * one memory buffer:
2147 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2148 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2149 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2151 return ICE_ERR_NO_MEMORY;
2153 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2155 /* Fill in the first switch rule i.e. large action */
2156 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2157 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2158 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2160 /* First action VSI forwarding or VSI list forwarding depending on how
2163 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2164 m_ent->fltr_info.fwd_id.hw_vsi_id;
2166 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2167 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2168 ICE_LG_ACT_VSI_LIST_ID_M;
2169 if (m_ent->vsi_count > 1)
2170 act |= ICE_LG_ACT_VSI_LIST;
2171 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2173 /* Second action descriptor type */
2174 act = ICE_LG_ACT_GENERIC;
2176 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2177 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2179 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2180 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2182 /* Third action Marker value */
2183 act |= ICE_LG_ACT_GENERIC;
2184 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2185 ICE_LG_ACT_GENERIC_VALUE_M;
2187 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2189 /* call the fill switch rule to fill the lookup Tx Rx structure */
2190 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2191 ice_aqc_opc_update_sw_rules);
2193 /* Update the action to point to the large action ID */
2194 rx_tx->pdata.lkup_tx_rx.act =
2195 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2196 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2197 ICE_SINGLE_ACT_PTR_VAL_M));
2199 /* Use the filter rule ID of the previously created rule with single
2200 * act. Once the update happens, hardware will treat this as large
2203 rx_tx->pdata.lkup_tx_rx.index =
2204 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2206 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2207 ice_aqc_opc_update_sw_rules, NULL);
2209 m_ent->lg_act_idx = l_id;
2210 m_ent->sw_marker_id = sw_marker;
2213 ice_free(hw, lg_act);
2218 * ice_add_counter_act - add/update filter rule with counter action
2219 * @hw: pointer to the hardware structure
2220 * @m_ent: the management entry for which counter needs to be added
2221 * @counter_id: VLAN counter ID returned as part of allocate resource
2222 * @l_id: large action resource ID
2224 static enum ice_status
2225 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2226 u16 counter_id, u16 l_id)
2228 struct ice_aqc_sw_rules_elem *lg_act;
2229 struct ice_aqc_sw_rules_elem *rx_tx;
2230 enum ice_status status;
2231 /* 2 actions will be added while adding a large action counter */
2232 const int num_acts = 2;
2239 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2240 return ICE_ERR_PARAM;
2242 /* Create two back-to-back switch rules and submit them to the HW using
2243 * one memory buffer:
2247 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2248 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2249 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2252 return ICE_ERR_NO_MEMORY;
2254 rx_tx = (struct ice_aqc_sw_rules_elem *)
2255 ((u8 *)lg_act + lg_act_size);
2257 /* Fill in the first switch rule i.e. large action */
2258 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2259 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2260 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2262 /* First action VSI forwarding or VSI list forwarding depending on how
2265 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2266 m_ent->fltr_info.fwd_id.hw_vsi_id;
2268 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2269 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2270 ICE_LG_ACT_VSI_LIST_ID_M;
2271 if (m_ent->vsi_count > 1)
2272 act |= ICE_LG_ACT_VSI_LIST;
2273 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2275 /* Second action counter ID */
2276 act = ICE_LG_ACT_STAT_COUNT;
2277 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2278 ICE_LG_ACT_STAT_COUNT_M;
2279 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2281 /* call the fill switch rule to fill the lookup Tx Rx structure */
2282 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2283 ice_aqc_opc_update_sw_rules);
2285 act = ICE_SINGLE_ACT_PTR;
2286 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2287 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2289 /* Use the filter rule ID of the previously created rule with single
2290 * act. Once the update happens, hardware will treat this as large
2293 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2294 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2296 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2297 ice_aqc_opc_update_sw_rules, NULL);
2299 m_ent->lg_act_idx = l_id;
2300 m_ent->counter_index = counter_id;
2303 ice_free(hw, lg_act);
2308 * ice_create_vsi_list_map
2309 * @hw: pointer to the hardware structure
2310 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2311 * @num_vsi: number of VSI handles in the array
2312 * @vsi_list_id: VSI list ID generated as part of allocate resource
2314 * Helper function to create a new entry of VSI list ID to VSI mapping
2315 * using the given VSI list ID
2317 static struct ice_vsi_list_map_info *
2318 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2321 struct ice_switch_info *sw = hw->switch_info;
2322 struct ice_vsi_list_map_info *v_map;
2325 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2330 v_map->vsi_list_id = vsi_list_id;
2332 for (i = 0; i < num_vsi; i++)
2333 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2335 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2340 * ice_update_vsi_list_rule
2341 * @hw: pointer to the hardware structure
2342 * @vsi_handle_arr: array of VSI handles to form a VSI list
2343 * @num_vsi: number of VSI handles in the array
2344 * @vsi_list_id: VSI list ID generated as part of allocate resource
2345 * @remove: Boolean value to indicate if this is a remove action
2346 * @opc: switch rules population command type - pass in the command opcode
2347 * @lkup_type: lookup type of the filter
2349 * Call AQ command to add a new switch rule or update existing switch rule
2350 * using the given VSI list ID
2352 static enum ice_status
2353 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2354 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2355 enum ice_sw_lkup_type lkup_type)
2357 struct ice_aqc_sw_rules_elem *s_rule;
2358 enum ice_status status;
2364 return ICE_ERR_PARAM;
2366 if (lkup_type == ICE_SW_LKUP_MAC ||
2367 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2368 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2369 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2370 lkup_type == ICE_SW_LKUP_PROMISC ||
2371 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2372 lkup_type == ICE_SW_LKUP_LAST)
2373 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2374 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2375 else if (lkup_type == ICE_SW_LKUP_VLAN)
2376 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2377 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2379 return ICE_ERR_PARAM;
2381 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2382 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2384 return ICE_ERR_NO_MEMORY;
2385 for (i = 0; i < num_vsi; i++) {
2386 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2387 status = ICE_ERR_PARAM;
2390 /* AQ call requires hw_vsi_id(s) */
2391 s_rule->pdata.vsi_list.vsi[i] =
2392 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2395 s_rule->type = CPU_TO_LE16(rule_type);
2396 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2397 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2399 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2402 ice_free(hw, s_rule);
2407 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2408 * @hw: pointer to the HW struct
2409 * @vsi_handle_arr: array of VSI handles to form a VSI list
2410 * @num_vsi: number of VSI handles in the array
2411 * @vsi_list_id: stores the ID of the VSI list to be created
2412 * @lkup_type: switch rule filter's lookup type
2414 static enum ice_status
2415 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2416 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2418 enum ice_status status;
2420 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2421 ice_aqc_opc_alloc_res);
2425 /* Update the newly created VSI list to include the specified VSIs */
2426 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2427 *vsi_list_id, false,
2428 ice_aqc_opc_add_sw_rules, lkup_type);
2432 * ice_create_pkt_fwd_rule
2433 * @hw: pointer to the hardware structure
2434 * @recp_list: corresponding filter management list
2435 * @f_entry: entry containing packet forwarding information
2437 * Create switch rule with given filter information and add an entry
2438 * to the corresponding filter management list to track this switch rule
2441 static enum ice_status
2442 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2443 struct ice_fltr_list_entry *f_entry)
2445 struct ice_fltr_mgmt_list_entry *fm_entry;
2446 struct ice_aqc_sw_rules_elem *s_rule;
2447 enum ice_status status;
2449 s_rule = (struct ice_aqc_sw_rules_elem *)
2450 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2452 return ICE_ERR_NO_MEMORY;
2453 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2454 ice_malloc(hw, sizeof(*fm_entry));
2456 status = ICE_ERR_NO_MEMORY;
2457 goto ice_create_pkt_fwd_rule_exit;
2460 fm_entry->fltr_info = f_entry->fltr_info;
2462 /* Initialize all the fields for the management entry */
2463 fm_entry->vsi_count = 1;
2464 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2465 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2466 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2468 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2469 ice_aqc_opc_add_sw_rules);
2471 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2472 ice_aqc_opc_add_sw_rules, NULL);
2474 ice_free(hw, fm_entry);
2475 goto ice_create_pkt_fwd_rule_exit;
2478 f_entry->fltr_info.fltr_rule_id =
2479 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2480 fm_entry->fltr_info.fltr_rule_id =
2481 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2483 /* The book keeping entries will get removed when base driver
2484 * calls remove filter AQ command
2486 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2488 ice_create_pkt_fwd_rule_exit:
2489 ice_free(hw, s_rule);
2494 * ice_update_pkt_fwd_rule
2495 * @hw: pointer to the hardware structure
2496 * @f_info: filter information for switch rule
2498 * Call AQ command to update a previously created switch rule with a
2501 static enum ice_status
2502 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2504 struct ice_aqc_sw_rules_elem *s_rule;
2505 enum ice_status status;
2507 s_rule = (struct ice_aqc_sw_rules_elem *)
2508 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2510 return ICE_ERR_NO_MEMORY;
2512 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2514 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2516 /* Update switch rule with new rule set to forward VSI list */
2517 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2518 ice_aqc_opc_update_sw_rules, NULL);
2520 ice_free(hw, s_rule);
2525 * ice_update_sw_rule_bridge_mode
2526 * @hw: pointer to the HW struct
2528 * Updates unicast switch filter rules based on VEB/VEPA mode
2530 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2532 struct ice_switch_info *sw = hw->switch_info;
2533 struct ice_fltr_mgmt_list_entry *fm_entry;
2534 enum ice_status status = ICE_SUCCESS;
2535 struct LIST_HEAD_TYPE *rule_head;
2536 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2538 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2539 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2541 ice_acquire_lock(rule_lock);
2542 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2544 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2545 u8 *addr = fi->l_data.mac.mac_addr;
2547 /* Update unicast Tx rules to reflect the selected
2550 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2551 (fi->fltr_act == ICE_FWD_TO_VSI ||
2552 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2553 fi->fltr_act == ICE_FWD_TO_Q ||
2554 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2555 status = ice_update_pkt_fwd_rule(hw, fi);
2561 ice_release_lock(rule_lock);
2567 * ice_add_update_vsi_list
2568 * @hw: pointer to the hardware structure
2569 * @m_entry: pointer to current filter management list entry
2570 * @cur_fltr: filter information from the book keeping entry
2571 * @new_fltr: filter information with the new VSI to be added
2573 * Call AQ command to add or update previously created VSI list with new VSI.
2575 * Helper function to do book keeping associated with adding filter information
2576 * The algorithm to do the book keeping is described below :
2577 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2578 * if only one VSI has been added till now
2579 * Allocate a new VSI list and add two VSIs
2580 * to this list using switch rule command
2581 * Update the previously created switch rule with the
2582 * newly created VSI list ID
2583 * if a VSI list was previously created
2584 * Add the new VSI to the previously created VSI list set
2585 * using the update switch rule command
2587 static enum ice_status
2588 ice_add_update_vsi_list(struct ice_hw *hw,
2589 struct ice_fltr_mgmt_list_entry *m_entry,
2590 struct ice_fltr_info *cur_fltr,
2591 struct ice_fltr_info *new_fltr)
2593 enum ice_status status = ICE_SUCCESS;
2594 u16 vsi_list_id = 0;
2596 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2597 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2598 return ICE_ERR_NOT_IMPL;
2600 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2601 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2602 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2603 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2604 return ICE_ERR_NOT_IMPL;
2606 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2607 /* Only one entry existed in the mapping and it was not already
2608 * a part of a VSI list. So, create a VSI list with the old and
2611 struct ice_fltr_info tmp_fltr;
2612 u16 vsi_handle_arr[2];
2614 /* A rule already exists with the new VSI being added */
2615 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2616 return ICE_ERR_ALREADY_EXISTS;
2618 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2619 vsi_handle_arr[1] = new_fltr->vsi_handle;
2620 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2622 new_fltr->lkup_type);
2626 tmp_fltr = *new_fltr;
2627 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2628 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2629 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2630 /* Update the previous switch rule of "MAC forward to VSI" to
2631 * "MAC fwd to VSI list"
2633 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2637 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2638 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2639 m_entry->vsi_list_info =
2640 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2643 /* If this entry was large action then the large action needs
2644 * to be updated to point to FWD to VSI list
2646 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2648 ice_add_marker_act(hw, m_entry,
2649 m_entry->sw_marker_id,
2650 m_entry->lg_act_idx);
2652 u16 vsi_handle = new_fltr->vsi_handle;
2653 enum ice_adminq_opc opcode;
2655 if (!m_entry->vsi_list_info)
2658 /* A rule already exists with the new VSI being added */
2659 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2662 /* Update the previously created VSI list set with
2663 * the new VSI ID passed in
2665 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2666 opcode = ice_aqc_opc_update_sw_rules;
2668 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2669 vsi_list_id, false, opcode,
2670 new_fltr->lkup_type);
2671 /* update VSI list mapping info with new VSI ID */
2673 ice_set_bit(vsi_handle,
2674 m_entry->vsi_list_info->vsi_map);
2677 m_entry->vsi_count++;
2682 * ice_find_rule_entry - Search a rule entry
2683 * @list_head: head of rule list
2684 * @f_info: rule information
2686 * Helper function to search for a given rule entry
2687 * Returns pointer to entry storing the rule if found
2689 static struct ice_fltr_mgmt_list_entry *
2690 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2691 struct ice_fltr_info *f_info)
2693 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2695 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2697 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2698 sizeof(f_info->l_data)) &&
2699 f_info->flag == list_itr->fltr_info.flag) {
2708 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2709 * @recp_list: VSI lists needs to be searched
2710 * @vsi_handle: VSI handle to be found in VSI list
2711 * @vsi_list_id: VSI list ID found containing vsi_handle
2713 * Helper function to search a VSI list with single entry containing given VSI
2714 * handle element. This can be extended further to search VSI list with more
2715 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2717 static struct ice_vsi_list_map_info *
2718 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2721 struct ice_vsi_list_map_info *map_info = NULL;
2722 struct LIST_HEAD_TYPE *list_head;
2724 list_head = &recp_list->filt_rules;
2725 if (recp_list->adv_rule) {
2726 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2728 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2729 ice_adv_fltr_mgmt_list_entry,
2731 if (list_itr->vsi_list_info) {
2732 map_info = list_itr->vsi_list_info;
2733 if (ice_is_bit_set(map_info->vsi_map,
2735 *vsi_list_id = map_info->vsi_list_id;
2741 struct ice_fltr_mgmt_list_entry *list_itr;
2743 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2744 ice_fltr_mgmt_list_entry,
2746 if (list_itr->vsi_count == 1 &&
2747 list_itr->vsi_list_info) {
2748 map_info = list_itr->vsi_list_info;
2749 if (ice_is_bit_set(map_info->vsi_map,
2751 *vsi_list_id = map_info->vsi_list_id;
2761 * ice_add_rule_internal - add rule for a given lookup type
2762 * @hw: pointer to the hardware structure
2763 * @recp_list: recipe list for which rule has to be added
2764 * @lport: logic port number on which function add rule
2765 * @f_entry: structure containing MAC forwarding information
2767 * Adds or updates the rule lists for a given recipe
2769 static enum ice_status
2770 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2771 u8 lport, struct ice_fltr_list_entry *f_entry)
2773 struct ice_fltr_info *new_fltr, *cur_fltr;
2774 struct ice_fltr_mgmt_list_entry *m_entry;
2775 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2776 enum ice_status status = ICE_SUCCESS;
2778 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2779 return ICE_ERR_PARAM;
2781 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2782 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2783 f_entry->fltr_info.fwd_id.hw_vsi_id =
2784 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2786 rule_lock = &recp_list->filt_rule_lock;
2788 ice_acquire_lock(rule_lock);
2789 new_fltr = &f_entry->fltr_info;
2790 if (new_fltr->flag & ICE_FLTR_RX)
2791 new_fltr->src = lport;
2792 else if (new_fltr->flag & ICE_FLTR_TX)
2794 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2796 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2798 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2799 goto exit_add_rule_internal;
2802 cur_fltr = &m_entry->fltr_info;
2803 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2805 exit_add_rule_internal:
2806 ice_release_lock(rule_lock);
2811 * ice_remove_vsi_list_rule
2812 * @hw: pointer to the hardware structure
2813 * @vsi_list_id: VSI list ID generated as part of allocate resource
2814 * @lkup_type: switch rule filter lookup type
2816 * The VSI list should be emptied before this function is called to remove the
2819 static enum ice_status
2820 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2821 enum ice_sw_lkup_type lkup_type)
2823 struct ice_aqc_sw_rules_elem *s_rule;
2824 enum ice_status status;
2827 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2828 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2830 return ICE_ERR_NO_MEMORY;
2832 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2833 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2835 /* Free the vsi_list resource that we allocated. It is assumed that the
2836 * list is empty at this point.
2838 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2839 ice_aqc_opc_free_res);
2841 ice_free(hw, s_rule);
2846 * ice_rem_update_vsi_list
2847 * @hw: pointer to the hardware structure
2848 * @vsi_handle: VSI handle of the VSI to remove
2849 * @fm_list: filter management entry for which the VSI list management needs to
2852 static enum ice_status
2853 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2854 struct ice_fltr_mgmt_list_entry *fm_list)
2856 enum ice_sw_lkup_type lkup_type;
2857 enum ice_status status = ICE_SUCCESS;
2860 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2861 fm_list->vsi_count == 0)
2862 return ICE_ERR_PARAM;
2864 /* A rule with the VSI being removed does not exist */
2865 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2866 return ICE_ERR_DOES_NOT_EXIST;
2868 lkup_type = fm_list->fltr_info.lkup_type;
2869 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2870 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2871 ice_aqc_opc_update_sw_rules,
2876 fm_list->vsi_count--;
2877 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2879 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2880 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2881 struct ice_vsi_list_map_info *vsi_list_info =
2882 fm_list->vsi_list_info;
2885 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2887 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2888 return ICE_ERR_OUT_OF_RANGE;
2890 /* Make sure VSI list is empty before removing it below */
2891 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2893 ice_aqc_opc_update_sw_rules,
2898 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2899 tmp_fltr_info.fwd_id.hw_vsi_id =
2900 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2901 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2902 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2904 ice_debug(hw, ICE_DBG_SW,
2905 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2906 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2910 fm_list->fltr_info = tmp_fltr_info;
2913 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2914 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2915 struct ice_vsi_list_map_info *vsi_list_info =
2916 fm_list->vsi_list_info;
2918 /* Remove the VSI list since it is no longer used */
2919 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2921 ice_debug(hw, ICE_DBG_SW,
2922 "Failed to remove VSI list %d, error %d\n",
2923 vsi_list_id, status);
2927 LIST_DEL(&vsi_list_info->list_entry);
2928 ice_free(hw, vsi_list_info);
2929 fm_list->vsi_list_info = NULL;
2936 * ice_remove_rule_internal - Remove a filter rule of a given type
2938 * @hw: pointer to the hardware structure
2939 * @recp_list: recipe list for which the rule needs to removed
2940 * @f_entry: rule entry containing filter information
2942 static enum ice_status
2943 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2944 struct ice_fltr_list_entry *f_entry)
2946 struct ice_fltr_mgmt_list_entry *list_elem;
2947 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2948 enum ice_status status = ICE_SUCCESS;
2949 bool remove_rule = false;
2952 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2953 return ICE_ERR_PARAM;
2954 f_entry->fltr_info.fwd_id.hw_vsi_id =
2955 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2957 rule_lock = &recp_list->filt_rule_lock;
2958 ice_acquire_lock(rule_lock);
2959 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2960 &f_entry->fltr_info);
2962 status = ICE_ERR_DOES_NOT_EXIST;
2966 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2968 } else if (!list_elem->vsi_list_info) {
2969 status = ICE_ERR_DOES_NOT_EXIST;
2971 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2972 /* a ref_cnt > 1 indicates that the vsi_list is being
2973 * shared by multiple rules. Decrement the ref_cnt and
2974 * remove this rule, but do not modify the list, as it
2975 * is in-use by other rules.
2977 list_elem->vsi_list_info->ref_cnt--;
2980 /* a ref_cnt of 1 indicates the vsi_list is only used
2981 * by one rule. However, the original removal request is only
2982 * for a single VSI. Update the vsi_list first, and only
2983 * remove the rule if there are no further VSIs in this list.
2985 vsi_handle = f_entry->fltr_info.vsi_handle;
2986 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2989 /* if VSI count goes to zero after updating the VSI list */
2990 if (list_elem->vsi_count == 0)
2995 /* Remove the lookup rule */
2996 struct ice_aqc_sw_rules_elem *s_rule;
2998 s_rule = (struct ice_aqc_sw_rules_elem *)
2999 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3001 status = ICE_ERR_NO_MEMORY;
3005 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3006 ice_aqc_opc_remove_sw_rules);
3008 status = ice_aq_sw_rules(hw, s_rule,
3009 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3010 ice_aqc_opc_remove_sw_rules, NULL);
3012 /* Remove a book keeping from the list */
3013 ice_free(hw, s_rule);
3018 LIST_DEL(&list_elem->list_entry);
3019 ice_free(hw, list_elem);
3022 ice_release_lock(rule_lock);
3027 * ice_aq_get_res_alloc - get allocated resources
3028 * @hw: pointer to the HW struct
3029 * @num_entries: pointer to u16 to store the number of resource entries returned
3030 * @buf: pointer to user-supplied buffer
3031 * @buf_size: size of buff
3032 * @cd: pointer to command details structure or NULL
3034 * The user-supplied buffer must be large enough to store the resource
3035 * information for all resource types. Each resource type is an
3036 * ice_aqc_get_res_resp_data_elem structure.
3039 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3040 u16 buf_size, struct ice_sq_cd *cd)
3042 struct ice_aqc_get_res_alloc *resp;
3043 enum ice_status status;
3044 struct ice_aq_desc desc;
3047 return ICE_ERR_BAD_PTR;
3049 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3050 return ICE_ERR_INVAL_SIZE;
3052 resp = &desc.params.get_res;
3054 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3055 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3057 if (!status && num_entries)
3058 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3064 * ice_aq_get_res_descs - get allocated resource descriptors
3065 * @hw: pointer to the hardware structure
3066 * @num_entries: number of resource entries in buffer
3067 * @buf: Indirect buffer to hold data parameters and response
3068 * @buf_size: size of buffer for indirect commands
3069 * @res_type: resource type
3070 * @res_shared: is resource shared
3071 * @desc_id: input - first desc ID to start; output - next desc ID
3072 * @cd: pointer to command details structure or NULL
3075 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3076 struct ice_aqc_get_allocd_res_desc_resp *buf,
3077 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3078 struct ice_sq_cd *cd)
3080 struct ice_aqc_get_allocd_res_desc *cmd;
3081 struct ice_aq_desc desc;
3082 enum ice_status status;
3084 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3086 cmd = &desc.params.get_res_desc;
3089 return ICE_ERR_PARAM;
3091 if (buf_size != (num_entries * sizeof(*buf)))
3092 return ICE_ERR_PARAM;
3094 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3096 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3097 ICE_AQC_RES_TYPE_M) | (res_shared ?
3098 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3099 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3101 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3103 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3109 * ice_add_mac_rule - Add a MAC address based filter rule
3110 * @hw: pointer to the hardware structure
3111 * @m_list: list of MAC addresses and forwarding information
3112 * @sw: pointer to switch info struct for which function add rule
3113 * @lport: logic port number on which function add rule
3115 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3116 * multiple unicast addresses, the function assumes that all the
3117 * addresses are unique in a given add_mac call. It doesn't
3118 * check for duplicates in this case, removing duplicates from a given
3119 * list should be taken care of in the caller of this function.
3121 static enum ice_status
3122 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3123 struct ice_switch_info *sw, u8 lport)
3125 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3126 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3127 struct ice_fltr_list_entry *m_list_itr;
3128 struct LIST_HEAD_TYPE *rule_head;
3129 u16 total_elem_left, s_rule_size;
3130 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3131 enum ice_status status = ICE_SUCCESS;
3132 u16 num_unicast = 0;
3136 rule_lock = &recp_list->filt_rule_lock;
3137 rule_head = &recp_list->filt_rules;
3139 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3141 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3145 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3146 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3147 if (!ice_is_vsi_valid(hw, vsi_handle))
3148 return ICE_ERR_PARAM;
3149 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3150 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3151 /* update the src in case it is VSI num */
3152 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3153 return ICE_ERR_PARAM;
3154 m_list_itr->fltr_info.src = hw_vsi_id;
3155 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3156 IS_ZERO_ETHER_ADDR(add))
3157 return ICE_ERR_PARAM;
3158 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3159 /* Don't overwrite the unicast address */
3160 ice_acquire_lock(rule_lock);
3161 if (ice_find_rule_entry(rule_head,
3162 &m_list_itr->fltr_info)) {
3163 ice_release_lock(rule_lock);
3164 return ICE_ERR_ALREADY_EXISTS;
3166 ice_release_lock(rule_lock);
3168 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3169 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3170 m_list_itr->status =
3171 ice_add_rule_internal(hw, recp_list, lport,
3173 if (m_list_itr->status)
3174 return m_list_itr->status;
3178 ice_acquire_lock(rule_lock);
3179 /* Exit if no suitable entries were found for adding bulk switch rule */
3181 status = ICE_SUCCESS;
3182 goto ice_add_mac_exit;
3185 /* Allocate switch rule buffer for the bulk update for unicast */
3186 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3187 s_rule = (struct ice_aqc_sw_rules_elem *)
3188 ice_calloc(hw, num_unicast, s_rule_size);
3190 status = ICE_ERR_NO_MEMORY;
3191 goto ice_add_mac_exit;
3195 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3197 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3198 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3200 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3201 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3202 ice_aqc_opc_add_sw_rules);
3203 r_iter = (struct ice_aqc_sw_rules_elem *)
3204 ((u8 *)r_iter + s_rule_size);
3208 /* Call AQ bulk switch rule update for all unicast addresses */
3210 /* Call AQ switch rule in AQ_MAX chunk */
3211 for (total_elem_left = num_unicast; total_elem_left > 0;
3212 total_elem_left -= elem_sent) {
3213 struct ice_aqc_sw_rules_elem *entry = r_iter;
3215 elem_sent = MIN_T(u8, total_elem_left,
3216 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3217 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3218 elem_sent, ice_aqc_opc_add_sw_rules,
3221 goto ice_add_mac_exit;
3222 r_iter = (struct ice_aqc_sw_rules_elem *)
3223 ((u8 *)r_iter + (elem_sent * s_rule_size));
3226 /* Fill up rule ID based on the value returned from FW */
3228 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3230 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3231 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3232 struct ice_fltr_mgmt_list_entry *fm_entry;
3234 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3235 f_info->fltr_rule_id =
3236 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3237 f_info->fltr_act = ICE_FWD_TO_VSI;
3238 /* Create an entry to track this MAC address */
3239 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3240 ice_malloc(hw, sizeof(*fm_entry));
3242 status = ICE_ERR_NO_MEMORY;
3243 goto ice_add_mac_exit;
3245 fm_entry->fltr_info = *f_info;
3246 fm_entry->vsi_count = 1;
3247 /* The book keeping entries will get removed when
3248 * base driver calls remove filter AQ command
3251 LIST_ADD(&fm_entry->list_entry, rule_head);
3252 r_iter = (struct ice_aqc_sw_rules_elem *)
3253 ((u8 *)r_iter + s_rule_size);
3258 ice_release_lock(rule_lock);
3260 ice_free(hw, s_rule);
3265 * ice_add_mac - Add a MAC address based filter rule
3266 * @hw: pointer to the hardware structure
3267 * @m_list: list of MAC addresses and forwarding information
3269 * Function add MAC rule for logical port from HW struct
3272 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3275 return ICE_ERR_PARAM;
3277 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3278 hw->port_info->lport);
3282 * ice_add_vlan_internal - Add one VLAN based filter rule
3283 * @hw: pointer to the hardware structure
3284 * @recp_list: recipe list for which rule has to be added
3285 * @f_entry: filter entry containing one VLAN information
3287 static enum ice_status
3288 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3289 struct ice_fltr_list_entry *f_entry)
3291 struct ice_fltr_mgmt_list_entry *v_list_itr;
3292 struct ice_fltr_info *new_fltr, *cur_fltr;
3293 enum ice_sw_lkup_type lkup_type;
3294 u16 vsi_list_id = 0, vsi_handle;
3295 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3296 enum ice_status status = ICE_SUCCESS;
3298 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3299 return ICE_ERR_PARAM;
3301 f_entry->fltr_info.fwd_id.hw_vsi_id =
3302 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3303 new_fltr = &f_entry->fltr_info;
3305 /* VLAN ID should only be 12 bits */
3306 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3307 return ICE_ERR_PARAM;
3309 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3310 return ICE_ERR_PARAM;
3312 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3313 lkup_type = new_fltr->lkup_type;
3314 vsi_handle = new_fltr->vsi_handle;
3315 rule_lock = &recp_list->filt_rule_lock;
3316 ice_acquire_lock(rule_lock);
3317 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3319 struct ice_vsi_list_map_info *map_info = NULL;
3321 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3322 /* All VLAN pruning rules use a VSI list. Check if
3323 * there is already a VSI list containing VSI that we
3324 * want to add. If found, use the same vsi_list_id for
3325 * this new VLAN rule or else create a new list.
3327 map_info = ice_find_vsi_list_entry(recp_list,
3331 status = ice_create_vsi_list_rule(hw,
3339 /* Convert the action to forwarding to a VSI list. */
3340 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3341 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3344 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3346 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3349 status = ICE_ERR_DOES_NOT_EXIST;
3352 /* reuse VSI list for new rule and increment ref_cnt */
3354 v_list_itr->vsi_list_info = map_info;
3355 map_info->ref_cnt++;
3357 v_list_itr->vsi_list_info =
3358 ice_create_vsi_list_map(hw, &vsi_handle,
3362 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3363 /* Update existing VSI list to add new VSI ID only if it used
3366 cur_fltr = &v_list_itr->fltr_info;
3367 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3370 /* If VLAN rule exists and VSI list being used by this rule is
3371 * referenced by more than 1 VLAN rule. Then create a new VSI
3372 * list appending previous VSI with new VSI and update existing
3373 * VLAN rule to point to new VSI list ID
3375 struct ice_fltr_info tmp_fltr;
3376 u16 vsi_handle_arr[2];
3379 /* Current implementation only supports reusing VSI list with
3380 * one VSI count. We should never hit below condition
3382 if (v_list_itr->vsi_count > 1 &&
3383 v_list_itr->vsi_list_info->ref_cnt > 1) {
3384 ice_debug(hw, ICE_DBG_SW,
3385 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3386 status = ICE_ERR_CFG;
3391 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3394 /* A rule already exists with the new VSI being added */
3395 if (cur_handle == vsi_handle) {
3396 status = ICE_ERR_ALREADY_EXISTS;
3400 vsi_handle_arr[0] = cur_handle;
3401 vsi_handle_arr[1] = vsi_handle;
3402 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3403 &vsi_list_id, lkup_type);
3407 tmp_fltr = v_list_itr->fltr_info;
3408 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3409 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3410 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3411 /* Update the previous switch rule to a new VSI list which
3412 * includes current VSI that is requested
3414 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3418 /* before overriding VSI list map info. decrement ref_cnt of
3421 v_list_itr->vsi_list_info->ref_cnt--;
3423 /* now update to newly created list */
3424 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3425 v_list_itr->vsi_list_info =
3426 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3428 v_list_itr->vsi_count++;
3432 ice_release_lock(rule_lock);
3437 * ice_add_vlan_rule - Add VLAN based filter rule
3438 * @hw: pointer to the hardware structure
3439 * @v_list: list of VLAN entries and forwarding information
3440 * @sw: pointer to switch info struct for which function add rule
3442 static enum ice_status
3443 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3444 struct ice_switch_info *sw)
3446 struct ice_fltr_list_entry *v_list_itr;
3447 struct ice_sw_recipe *recp_list;
3449 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3450 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3452 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3453 return ICE_ERR_PARAM;
3454 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3455 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3457 if (v_list_itr->status)
3458 return v_list_itr->status;
3464 * ice_add_vlan - Add a VLAN based filter rule
3465 * @hw: pointer to the hardware structure
3466 * @v_list: list of VLAN and forwarding information
3468 * Function add VLAN rule for logical port from HW struct
3471 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3474 return ICE_ERR_PARAM;
3476 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3480 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3481 * @hw: pointer to the hardware structure
3482 * @mv_list: list of MAC and VLAN filters
3484 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3485 * pruning bits enabled, then it is the responsibility of the caller to make
3486 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3487 * VLAN won't be received on that VSI otherwise.
3490 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3492 struct ice_fltr_list_entry *mv_list_itr;
3493 struct ice_sw_recipe *recp_list;
3495 if (!mv_list || !hw)
3496 return ICE_ERR_PARAM;
3498 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3499 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3501 enum ice_sw_lkup_type l_type =
3502 mv_list_itr->fltr_info.lkup_type;
3504 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3505 return ICE_ERR_PARAM;
3506 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3507 mv_list_itr->status =
3508 ice_add_rule_internal(hw, recp_list,
3509 hw->port_info->lport,
3511 if (mv_list_itr->status)
3512 return mv_list_itr->status;
3518 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3519 * @hw: pointer to the hardware structure
3520 * @em_list: list of ether type MAC filter, MAC is optional
3521 * @sw: pointer to switch info struct for which function add rule
3522 * @lport: logic port number on which function add rule
3524 * This function requires the caller to populate the entries in
3525 * the filter list with the necessary fields (including flags to
3526 * indicate Tx or Rx rules).
3528 static enum ice_status
3529 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3530 struct ice_switch_info *sw, u8 lport)
3532 struct ice_fltr_list_entry *em_list_itr;
3534 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3536 struct ice_sw_recipe *recp_list;
3537 enum ice_sw_lkup_type l_type;
3539 l_type = em_list_itr->fltr_info.lkup_type;
3540 recp_list = &sw->recp_list[l_type];
3542 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3543 l_type != ICE_SW_LKUP_ETHERTYPE)
3544 return ICE_ERR_PARAM;
3546 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3549 if (em_list_itr->status)
3550 return em_list_itr->status;
3557 * ice_add_eth_mac - Add a ethertype based filter rule
3558 * @hw: pointer to the hardware structure
3559 * @em_list: list of ethertype and forwarding information
3561 * Function add ethertype rule for logical port from HW struct
3563 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3565 if (!em_list || !hw)
3566 return ICE_ERR_PARAM;
3568 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3569 hw->port_info->lport);
3573 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3574 * @hw: pointer to the hardware structure
3575 * @em_list: list of ethertype or ethertype MAC entries
3576 * @sw: pointer to switch info struct for which function add rule
3578 static enum ice_status
3579 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3580 struct ice_switch_info *sw)
3582 struct ice_fltr_list_entry *em_list_itr, *tmp;
3584 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3586 struct ice_sw_recipe *recp_list;
3587 enum ice_sw_lkup_type l_type;
3589 l_type = em_list_itr->fltr_info.lkup_type;
3591 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3592 l_type != ICE_SW_LKUP_ETHERTYPE)
3593 return ICE_ERR_PARAM;
3595 recp_list = &sw->recp_list[l_type];
3596 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3598 if (em_list_itr->status)
3599 return em_list_itr->status;
3605 * ice_remove_eth_mac - remove a ethertype based filter rule
3606 * @hw: pointer to the hardware structure
3607 * @em_list: list of ethertype and forwarding information
3611 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3613 if (!em_list || !hw)
3614 return ICE_ERR_PARAM;
3616 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3620 * ice_rem_sw_rule_info
3621 * @hw: pointer to the hardware structure
3622 * @rule_head: pointer to the switch list structure that we want to delete
3625 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3627 if (!LIST_EMPTY(rule_head)) {
3628 struct ice_fltr_mgmt_list_entry *entry;
3629 struct ice_fltr_mgmt_list_entry *tmp;
3631 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3632 ice_fltr_mgmt_list_entry, list_entry) {
3633 LIST_DEL(&entry->list_entry);
3634 ice_free(hw, entry);
3640 * ice_rem_adv_rule_info
3641 * @hw: pointer to the hardware structure
3642 * @rule_head: pointer to the switch list structure that we want to delete
3645 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3647 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3648 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3650 if (LIST_EMPTY(rule_head))
3653 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3654 ice_adv_fltr_mgmt_list_entry, list_entry) {
3655 LIST_DEL(&lst_itr->list_entry);
3656 ice_free(hw, lst_itr->lkups);
3657 ice_free(hw, lst_itr);
3662 * ice_rem_all_sw_rules_info
3663 * @hw: pointer to the hardware structure
3665 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3667 struct ice_switch_info *sw = hw->switch_info;
3670 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3671 struct LIST_HEAD_TYPE *rule_head;
3673 rule_head = &sw->recp_list[i].filt_rules;
3674 if (!sw->recp_list[i].adv_rule)
3675 ice_rem_sw_rule_info(hw, rule_head);
3677 ice_rem_adv_rule_info(hw, rule_head);
3682 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3683 * @pi: pointer to the port_info structure
3684 * @vsi_handle: VSI handle to set as default
3685 * @set: true to add the above mentioned switch rule, false to remove it
3686 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3688 * add filter rule to set/unset given VSI as default VSI for the switch
3689 * (represented by swid)
3692 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3695 struct ice_aqc_sw_rules_elem *s_rule;
3696 struct ice_fltr_info f_info;
3697 struct ice_hw *hw = pi->hw;
3698 enum ice_adminq_opc opcode;
3699 enum ice_status status;
3703 if (!ice_is_vsi_valid(hw, vsi_handle))
3704 return ICE_ERR_PARAM;
3705 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3707 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3708 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3709 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3711 return ICE_ERR_NO_MEMORY;
3713 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3715 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3716 f_info.flag = direction;
3717 f_info.fltr_act = ICE_FWD_TO_VSI;
3718 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3720 if (f_info.flag & ICE_FLTR_RX) {
3721 f_info.src = pi->lport;
3722 f_info.src_id = ICE_SRC_ID_LPORT;
3724 f_info.fltr_rule_id =
3725 pi->dflt_rx_vsi_rule_id;
3726 } else if (f_info.flag & ICE_FLTR_TX) {
3727 f_info.src_id = ICE_SRC_ID_VSI;
3728 f_info.src = hw_vsi_id;
3730 f_info.fltr_rule_id =
3731 pi->dflt_tx_vsi_rule_id;
3735 opcode = ice_aqc_opc_add_sw_rules;
3737 opcode = ice_aqc_opc_remove_sw_rules;
3739 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3741 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3742 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3745 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3747 if (f_info.flag & ICE_FLTR_TX) {
3748 pi->dflt_tx_vsi_num = hw_vsi_id;
3749 pi->dflt_tx_vsi_rule_id = index;
3750 } else if (f_info.flag & ICE_FLTR_RX) {
3751 pi->dflt_rx_vsi_num = hw_vsi_id;
3752 pi->dflt_rx_vsi_rule_id = index;
3755 if (f_info.flag & ICE_FLTR_TX) {
3756 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3757 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3758 } else if (f_info.flag & ICE_FLTR_RX) {
3759 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3760 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3765 ice_free(hw, s_rule);
3770 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3771 * @list_head: head of rule list
3772 * @f_info: rule information
3774 * Helper function to search for a unicast rule entry - this is to be used
3775 * to remove unicast MAC filter that is not shared with other VSIs on the
3778 * Returns pointer to entry storing the rule if found
3780 static struct ice_fltr_mgmt_list_entry *
3781 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3782 struct ice_fltr_info *f_info)
3784 struct ice_fltr_mgmt_list_entry *list_itr;
3786 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3788 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3789 sizeof(f_info->l_data)) &&
3790 f_info->fwd_id.hw_vsi_id ==
3791 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3792 f_info->flag == list_itr->fltr_info.flag)
3799 * ice_remove_mac_rule - remove a MAC based filter rule
3800 * @hw: pointer to the hardware structure
3801 * @m_list: list of MAC addresses and forwarding information
3802 * @recp_list: list from which function remove MAC address
3804 * This function removes either a MAC filter rule or a specific VSI from a
3805 * VSI list for a multicast MAC address.
3807 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3808 * ice_add_mac. Caller should be aware that this call will only work if all
3809 * the entries passed into m_list were added previously. It will not attempt to
3810 * do a partial remove of entries that were found.
3812 static enum ice_status
3813 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3814 struct ice_sw_recipe *recp_list)
3816 struct ice_fltr_list_entry *list_itr, *tmp;
3817 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3820 return ICE_ERR_PARAM;
3822 rule_lock = &recp_list->filt_rule_lock;
3823 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3825 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3826 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3829 if (l_type != ICE_SW_LKUP_MAC)
3830 return ICE_ERR_PARAM;
3832 vsi_handle = list_itr->fltr_info.vsi_handle;
3833 if (!ice_is_vsi_valid(hw, vsi_handle))
3834 return ICE_ERR_PARAM;
3836 list_itr->fltr_info.fwd_id.hw_vsi_id =
3837 ice_get_hw_vsi_num(hw, vsi_handle);
3838 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3839 /* Don't remove the unicast address that belongs to
3840 * another VSI on the switch, since it is not being
3843 ice_acquire_lock(rule_lock);
3844 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3845 &list_itr->fltr_info)) {
3846 ice_release_lock(rule_lock);
3847 return ICE_ERR_DOES_NOT_EXIST;
3849 ice_release_lock(rule_lock);
3851 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3853 if (list_itr->status)
3854 return list_itr->status;
3860 * ice_remove_mac - remove a MAC address based filter rule
3861 * @hw: pointer to the hardware structure
3862 * @m_list: list of MAC addresses and forwarding information
3866 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3868 struct ice_sw_recipe *recp_list;
3870 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3871 return ice_remove_mac_rule(hw, m_list, recp_list);
3875 * ice_remove_vlan_rule - Remove VLAN based filter rule
3876 * @hw: pointer to the hardware structure
3877 * @v_list: list of VLAN entries and forwarding information
3878 * @recp_list: list from which function remove VLAN
3880 static enum ice_status
3881 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3882 struct ice_sw_recipe *recp_list)
3884 struct ice_fltr_list_entry *v_list_itr, *tmp;
3886 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3888 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3890 if (l_type != ICE_SW_LKUP_VLAN)
3891 return ICE_ERR_PARAM;
3892 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3894 if (v_list_itr->status)
3895 return v_list_itr->status;
3901 * ice_remove_vlan - remove a VLAN address based filter rule
3902 * @hw: pointer to the hardware structure
3903 * @v_list: list of VLAN and forwarding information
3907 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3909 struct ice_sw_recipe *recp_list;
3912 return ICE_ERR_PARAM;
3914 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3915 return ice_remove_vlan_rule(hw, v_list, recp_list);
3919 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3920 * @hw: pointer to the hardware structure
3921 * @v_list: list of MAC VLAN entries and forwarding information
3924 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3926 struct ice_fltr_list_entry *v_list_itr, *tmp;
3927 struct ice_sw_recipe *recp_list;
3930 return ICE_ERR_PARAM;
3932 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3933 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3935 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3937 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3938 return ICE_ERR_PARAM;
3939 v_list_itr->status =
3940 ice_remove_rule_internal(hw, recp_list,
3942 if (v_list_itr->status)
3943 return v_list_itr->status;
3949 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3950 * @fm_entry: filter entry to inspect
3951 * @vsi_handle: VSI handle to compare with filter info
3954 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3956 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3957 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3958 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3959 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3964 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3965 * @hw: pointer to the hardware structure
3966 * @vsi_handle: VSI handle to remove filters from
3967 * @vsi_list_head: pointer to the list to add entry to
3968 * @fi: pointer to fltr_info of filter entry to copy & add
3970 * Helper function, used when creating a list of filters to remove from
3971 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3972 * original filter entry, with the exception of fltr_info.fltr_act and
3973 * fltr_info.fwd_id fields. These are set such that later logic can
3974 * extract which VSI to remove the fltr from, and pass on that information.
3976 static enum ice_status
3977 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3978 struct LIST_HEAD_TYPE *vsi_list_head,
3979 struct ice_fltr_info *fi)
3981 struct ice_fltr_list_entry *tmp;
3983 /* this memory is freed up in the caller function
3984 * once filters for this VSI are removed
3986 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3988 return ICE_ERR_NO_MEMORY;
3990 tmp->fltr_info = *fi;
3992 /* Overwrite these fields to indicate which VSI to remove filter from,
3993 * so find and remove logic can extract the information from the
3994 * list entries. Note that original entries will still have proper
3997 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3998 tmp->fltr_info.vsi_handle = vsi_handle;
3999 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4001 LIST_ADD(&tmp->list_entry, vsi_list_head);
4007 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4008 * @hw: pointer to the hardware structure
4009 * @vsi_handle: VSI handle to remove filters from
4010 * @lkup_list_head: pointer to the list that has certain lookup type filters
4011 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4013 * Locates all filters in lkup_list_head that are used by the given VSI,
4014 * and adds COPIES of those entries to vsi_list_head (intended to be used
4015 * to remove the listed filters).
4016 * Note that this means all entries in vsi_list_head must be explicitly
4017 * deallocated by the caller when done with list.
4019 static enum ice_status
4020 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4021 struct LIST_HEAD_TYPE *lkup_list_head,
4022 struct LIST_HEAD_TYPE *vsi_list_head)
4024 struct ice_fltr_mgmt_list_entry *fm_entry;
4025 enum ice_status status = ICE_SUCCESS;
4027 /* check to make sure VSI ID is valid and within boundary */
4028 if (!ice_is_vsi_valid(hw, vsi_handle))
4029 return ICE_ERR_PARAM;
4031 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4032 ice_fltr_mgmt_list_entry, list_entry) {
4033 struct ice_fltr_info *fi;
4035 fi = &fm_entry->fltr_info;
4036 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4039 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4048 * ice_determine_promisc_mask
4049 * @fi: filter info to parse
4051 * Helper function to determine which ICE_PROMISC_ mask corresponds
4052 * to given filter into.
4054 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4056 u16 vid = fi->l_data.mac_vlan.vlan_id;
4057 u8 *macaddr = fi->l_data.mac.mac_addr;
4058 bool is_tx_fltr = false;
4059 u8 promisc_mask = 0;
4061 if (fi->flag == ICE_FLTR_TX)
4064 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4065 promisc_mask |= is_tx_fltr ?
4066 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4067 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4068 promisc_mask |= is_tx_fltr ?
4069 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4070 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4071 promisc_mask |= is_tx_fltr ?
4072 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4074 promisc_mask |= is_tx_fltr ?
4075 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4077 return promisc_mask;
4081 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4082 * @hw: pointer to the hardware structure
4083 * @vsi_handle: VSI handle to retrieve info from
4084 * @promisc_mask: pointer to mask to be filled in
4085 * @vid: VLAN ID of promisc VLAN VSI
4088 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4091 struct ice_switch_info *sw = hw->switch_info;
4092 struct ice_fltr_mgmt_list_entry *itr;
4093 struct LIST_HEAD_TYPE *rule_head;
4094 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4096 if (!ice_is_vsi_valid(hw, vsi_handle))
4097 return ICE_ERR_PARAM;
4101 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4102 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4104 ice_acquire_lock(rule_lock);
4105 LIST_FOR_EACH_ENTRY(itr, rule_head,
4106 ice_fltr_mgmt_list_entry, list_entry) {
4107 /* Continue if this filter doesn't apply to this VSI or the
4108 * VSI ID is not in the VSI map for this filter
4110 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4113 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4115 ice_release_lock(rule_lock);
4121 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4122 * @hw: pointer to the hardware structure
4123 * @vsi_handle: VSI handle to retrieve info from
4124 * @promisc_mask: pointer to mask to be filled in
4125 * @vid: VLAN ID of promisc VLAN VSI
4128 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4131 struct ice_switch_info *sw = hw->switch_info;
4132 struct ice_fltr_mgmt_list_entry *itr;
4133 struct LIST_HEAD_TYPE *rule_head;
4134 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4136 if (!ice_is_vsi_valid(hw, vsi_handle))
4137 return ICE_ERR_PARAM;
4141 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4142 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4144 ice_acquire_lock(rule_lock);
4145 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4147 /* Continue if this filter doesn't apply to this VSI or the
4148 * VSI ID is not in the VSI map for this filter
4150 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4153 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4155 ice_release_lock(rule_lock);
4161 * ice_remove_promisc - Remove promisc based filter rules
4162 * @hw: pointer to the hardware structure
4163 * @recp_id: recipe ID for which the rule needs to removed
4164 * @v_list: list of promisc entries
4166 static enum ice_status
4167 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4168 struct LIST_HEAD_TYPE *v_list)
4170 struct ice_fltr_list_entry *v_list_itr, *tmp;
4171 struct ice_sw_recipe *recp_list;
4173 recp_list = &hw->switch_info->recp_list[recp_id];
4174 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4176 v_list_itr->status =
4177 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4178 if (v_list_itr->status)
4179 return v_list_itr->status;
4185 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4186 * @hw: pointer to the hardware structure
4187 * @vsi_handle: VSI handle to clear mode
4188 * @promisc_mask: mask of promiscuous config bits to clear
4189 * @vid: VLAN ID to clear VLAN promiscuous
4192 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4195 struct ice_switch_info *sw = hw->switch_info;
4196 struct ice_fltr_list_entry *fm_entry, *tmp;
4197 struct LIST_HEAD_TYPE remove_list_head;
4198 struct ice_fltr_mgmt_list_entry *itr;
4199 struct LIST_HEAD_TYPE *rule_head;
4200 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4201 enum ice_status status = ICE_SUCCESS;
4204 if (!ice_is_vsi_valid(hw, vsi_handle))
4205 return ICE_ERR_PARAM;
4207 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4208 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4210 recipe_id = ICE_SW_LKUP_PROMISC;
4212 rule_head = &sw->recp_list[recipe_id].filt_rules;
4213 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4215 INIT_LIST_HEAD(&remove_list_head);
4217 ice_acquire_lock(rule_lock);
4218 LIST_FOR_EACH_ENTRY(itr, rule_head,
4219 ice_fltr_mgmt_list_entry, list_entry) {
4220 struct ice_fltr_info *fltr_info;
4221 u8 fltr_promisc_mask = 0;
4223 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4225 fltr_info = &itr->fltr_info;
4227 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4228 vid != fltr_info->l_data.mac_vlan.vlan_id)
4231 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4233 /* Skip if filter is not completely specified by given mask */
4234 if (fltr_promisc_mask & ~promisc_mask)
4237 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4241 ice_release_lock(rule_lock);
4242 goto free_fltr_list;
4245 ice_release_lock(rule_lock);
4247 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4250 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4251 ice_fltr_list_entry, list_entry) {
4252 LIST_DEL(&fm_entry->list_entry);
4253 ice_free(hw, fm_entry);
4260 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4261 * @hw: pointer to the hardware structure
4262 * @vsi_handle: VSI handle to configure
4263 * @promisc_mask: mask of promiscuous config bits
4264 * @vid: VLAN ID to set VLAN promiscuous
4267 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4269 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4270 struct ice_fltr_list_entry f_list_entry;
4271 struct ice_fltr_info new_fltr;
4272 enum ice_status status = ICE_SUCCESS;
4278 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4280 if (!ice_is_vsi_valid(hw, vsi_handle))
4281 return ICE_ERR_PARAM;
4282 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4284 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4286 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4287 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4288 new_fltr.l_data.mac_vlan.vlan_id = vid;
4289 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4291 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4292 recipe_id = ICE_SW_LKUP_PROMISC;
4295 /* Separate filters must be set for each direction/packet type
4296 * combination, so we will loop over the mask value, store the
4297 * individual type, and clear it out in the input mask as it
4300 while (promisc_mask) {
4301 struct ice_sw_recipe *recp_list;
4307 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4308 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4309 pkt_type = UCAST_FLTR;
4310 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4311 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4312 pkt_type = UCAST_FLTR;
4314 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4315 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4316 pkt_type = MCAST_FLTR;
4317 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4318 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4319 pkt_type = MCAST_FLTR;
4321 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4322 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4323 pkt_type = BCAST_FLTR;
4324 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4325 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4326 pkt_type = BCAST_FLTR;
4330 /* Check for VLAN promiscuous flag */
4331 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4332 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4333 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4334 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4338 /* Set filter DA based on packet type */
4339 mac_addr = new_fltr.l_data.mac.mac_addr;
4340 if (pkt_type == BCAST_FLTR) {
4341 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4342 } else if (pkt_type == MCAST_FLTR ||
4343 pkt_type == UCAST_FLTR) {
4344 /* Use the dummy ether header DA */
4345 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4346 ICE_NONDMA_TO_NONDMA);
4347 if (pkt_type == MCAST_FLTR)
4348 mac_addr[0] |= 0x1; /* Set multicast bit */
4351 /* Need to reset this to zero for all iterations */
4354 new_fltr.flag |= ICE_FLTR_TX;
4355 new_fltr.src = hw_vsi_id;
4357 new_fltr.flag |= ICE_FLTR_RX;
4358 new_fltr.src = hw->port_info->lport;
4361 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4362 new_fltr.vsi_handle = vsi_handle;
4363 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4364 f_list_entry.fltr_info = new_fltr;
4365 recp_list = &hw->switch_info->recp_list[recipe_id];
4367 status = ice_add_rule_internal(hw, recp_list,
4368 hw->port_info->lport,
4370 if (status != ICE_SUCCESS)
4371 goto set_promisc_exit;
4379 * ice_set_vlan_vsi_promisc
4380 * @hw: pointer to the hardware structure
4381 * @vsi_handle: VSI handle to configure
4382 * @promisc_mask: mask of promiscuous config bits
4383 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4385 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4388 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4389 bool rm_vlan_promisc)
4391 struct ice_switch_info *sw = hw->switch_info;
4392 struct ice_fltr_list_entry *list_itr, *tmp;
4393 struct LIST_HEAD_TYPE vsi_list_head;
4394 struct LIST_HEAD_TYPE *vlan_head;
4395 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4396 enum ice_status status;
4399 INIT_LIST_HEAD(&vsi_list_head);
4400 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4401 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4402 ice_acquire_lock(vlan_lock);
4403 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4405 ice_release_lock(vlan_lock);
4407 goto free_fltr_list;
4409 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4411 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4412 if (rm_vlan_promisc)
4413 status = ice_clear_vsi_promisc(hw, vsi_handle,
4414 promisc_mask, vlan_id);
4416 status = ice_set_vsi_promisc(hw, vsi_handle,
4417 promisc_mask, vlan_id);
4423 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4424 ice_fltr_list_entry, list_entry) {
4425 LIST_DEL(&list_itr->list_entry);
4426 ice_free(hw, list_itr);
4432 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4433 * @hw: pointer to the hardware structure
4434 * @vsi_handle: VSI handle to remove filters from
4435 * @recp_list: recipe list from which function remove fltr
4436 * @lkup: switch rule filter lookup type
4439 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4440 struct ice_sw_recipe *recp_list,
4441 enum ice_sw_lkup_type lkup)
4443 struct ice_fltr_list_entry *fm_entry;
4444 struct LIST_HEAD_TYPE remove_list_head;
4445 struct LIST_HEAD_TYPE *rule_head;
4446 struct ice_fltr_list_entry *tmp;
4447 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4448 enum ice_status status;
4450 INIT_LIST_HEAD(&remove_list_head);
4451 rule_lock = &recp_list[lkup].filt_rule_lock;
4452 rule_head = &recp_list[lkup].filt_rules;
4453 ice_acquire_lock(rule_lock);
4454 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4456 ice_release_lock(rule_lock);
4461 case ICE_SW_LKUP_MAC:
4462 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4464 case ICE_SW_LKUP_VLAN:
4465 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4467 case ICE_SW_LKUP_PROMISC:
4468 case ICE_SW_LKUP_PROMISC_VLAN:
4469 ice_remove_promisc(hw, lkup, &remove_list_head);
4471 case ICE_SW_LKUP_MAC_VLAN:
4472 ice_remove_mac_vlan(hw, &remove_list_head);
4474 case ICE_SW_LKUP_ETHERTYPE:
4475 case ICE_SW_LKUP_ETHERTYPE_MAC:
4476 ice_remove_eth_mac(hw, &remove_list_head);
4478 case ICE_SW_LKUP_DFLT:
4479 ice_debug(hw, ICE_DBG_SW,
4480 "Remove filters for this lookup type hasn't been implemented yet\n");
4482 case ICE_SW_LKUP_LAST:
4483 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4487 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4488 ice_fltr_list_entry, list_entry) {
4489 LIST_DEL(&fm_entry->list_entry);
4490 ice_free(hw, fm_entry);
4495 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4496 * @hw: pointer to the hardware structure
4497 * @vsi_handle: VSI handle to remove filters from
4498 * @sw: pointer to switch info struct
4501 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4502 struct ice_switch_info *sw)
4504 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4506 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4507 sw->recp_list, ICE_SW_LKUP_MAC);
4508 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4509 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4510 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4511 sw->recp_list, ICE_SW_LKUP_PROMISC);
4512 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4513 sw->recp_list, ICE_SW_LKUP_VLAN);
4514 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4515 sw->recp_list, ICE_SW_LKUP_DFLT);
4516 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4517 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4518 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4519 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4520 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4521 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4525 * ice_remove_vsi_fltr - Remove all filters for a VSI
4526 * @hw: pointer to the hardware structure
4527 * @vsi_handle: VSI handle to remove filters from
4529 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4531 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4535 * ice_alloc_res_cntr - allocating resource counter
4536 * @hw: pointer to the hardware structure
4537 * @type: type of resource
4538 * @alloc_shared: if set it is shared else dedicated
4539 * @num_items: number of entries requested for FD resource type
4540 * @counter_id: counter index returned by AQ call
4543 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4546 struct ice_aqc_alloc_free_res_elem *buf;
4547 enum ice_status status;
4550 /* Allocate resource */
4551 buf_len = sizeof(*buf);
4552 buf = (struct ice_aqc_alloc_free_res_elem *)
4553 ice_malloc(hw, buf_len);
4555 return ICE_ERR_NO_MEMORY;
4557 buf->num_elems = CPU_TO_LE16(num_items);
4558 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4559 ICE_AQC_RES_TYPE_M) | alloc_shared);
4561 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4562 ice_aqc_opc_alloc_res, NULL);
4566 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4574 * ice_free_res_cntr - free resource counter
4575 * @hw: pointer to the hardware structure
4576 * @type: type of resource
4577 * @alloc_shared: if set it is shared else dedicated
4578 * @num_items: number of entries to be freed for FD resource type
4579 * @counter_id: counter ID resource which needs to be freed
4582 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4585 struct ice_aqc_alloc_free_res_elem *buf;
4586 enum ice_status status;
4590 buf_len = sizeof(*buf);
4591 buf = (struct ice_aqc_alloc_free_res_elem *)
4592 ice_malloc(hw, buf_len);
4594 return ICE_ERR_NO_MEMORY;
4596 buf->num_elems = CPU_TO_LE16(num_items);
4597 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4598 ICE_AQC_RES_TYPE_M) | alloc_shared);
4599 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4601 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4602 ice_aqc_opc_free_res, NULL);
4604 ice_debug(hw, ICE_DBG_SW,
4605 "counter resource could not be freed\n");
4612 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4613 * @hw: pointer to the hardware structure
4614 * @counter_id: returns counter index
4616 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4618 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4619 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4624 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4625 * @hw: pointer to the hardware structure
4626 * @counter_id: counter index to be freed
4628 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4630 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4631 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4636 * ice_alloc_res_lg_act - add large action resource
4637 * @hw: pointer to the hardware structure
4638 * @l_id: large action ID to fill it in
4639 * @num_acts: number of actions to hold with a large action entry
4641 static enum ice_status
4642 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4644 struct ice_aqc_alloc_free_res_elem *sw_buf;
4645 enum ice_status status;
4648 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4649 return ICE_ERR_PARAM;
4651 /* Allocate resource for large action */
4652 buf_len = sizeof(*sw_buf);
4653 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4654 ice_malloc(hw, buf_len);
4656 return ICE_ERR_NO_MEMORY;
4658 sw_buf->num_elems = CPU_TO_LE16(1);
4660 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4661 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4662 * If num_acts is greater than 2, then use
4663 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4664 * The num_acts cannot exceed 4. This was ensured at the
4665 * beginning of the function.
4668 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4669 else if (num_acts == 2)
4670 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4672 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4674 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4675 ice_aqc_opc_alloc_res, NULL);
4677 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4679 ice_free(hw, sw_buf);
4684 * ice_add_mac_with_sw_marker - add filter with sw marker
4685 * @hw: pointer to the hardware structure
4686 * @f_info: filter info structure containing the MAC filter information
4687 * @sw_marker: sw marker to tag the Rx descriptor with
4690 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4693 struct ice_fltr_mgmt_list_entry *m_entry;
4694 struct ice_fltr_list_entry fl_info;
4695 struct ice_sw_recipe *recp_list;
4696 struct LIST_HEAD_TYPE l_head;
4697 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4698 enum ice_status ret;
4702 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4703 return ICE_ERR_PARAM;
4705 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4706 return ICE_ERR_PARAM;
4708 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4709 return ICE_ERR_PARAM;
4711 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4712 return ICE_ERR_PARAM;
4713 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4715 /* Add filter if it doesn't exist so then the adding of large
4716 * action always results in update
4719 INIT_LIST_HEAD(&l_head);
4720 fl_info.fltr_info = *f_info;
4721 LIST_ADD(&fl_info.list_entry, &l_head);
4723 entry_exists = false;
4724 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4725 hw->port_info->lport);
4726 if (ret == ICE_ERR_ALREADY_EXISTS)
4727 entry_exists = true;
4731 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4732 rule_lock = &recp_list->filt_rule_lock;
4733 ice_acquire_lock(rule_lock);
4734 /* Get the book keeping entry for the filter */
4735 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4739 /* If counter action was enabled for this rule then don't enable
4740 * sw marker large action
4742 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4743 ret = ICE_ERR_PARAM;
4747 /* if same marker was added before */
4748 if (m_entry->sw_marker_id == sw_marker) {
4749 ret = ICE_ERR_ALREADY_EXISTS;
4753 /* Allocate a hardware table entry to hold large act. Three actions
4754 * for marker based large action
4756 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4760 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4763 /* Update the switch rule to add the marker action */
4764 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4766 ice_release_lock(rule_lock);
4771 ice_release_lock(rule_lock);
4772 /* only remove entry if it did not exist previously */
4774 ret = ice_remove_mac(hw, &l_head);
4780 * ice_add_mac_with_counter - add filter with counter enabled
4781 * @hw: pointer to the hardware structure
4782 * @f_info: pointer to filter info structure containing the MAC filter
4786 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4788 struct ice_fltr_mgmt_list_entry *m_entry;
4789 struct ice_fltr_list_entry fl_info;
4790 struct ice_sw_recipe *recp_list;
4791 struct LIST_HEAD_TYPE l_head;
4792 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4793 enum ice_status ret;
4798 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4799 return ICE_ERR_PARAM;
4801 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4802 return ICE_ERR_PARAM;
4804 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4805 return ICE_ERR_PARAM;
4806 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4807 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4809 entry_exist = false;
4811 rule_lock = &recp_list->filt_rule_lock;
4813 /* Add filter if it doesn't exist so then the adding of large
4814 * action always results in update
4816 INIT_LIST_HEAD(&l_head);
4818 fl_info.fltr_info = *f_info;
4819 LIST_ADD(&fl_info.list_entry, &l_head);
4821 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4822 hw->port_info->lport);
4823 if (ret == ICE_ERR_ALREADY_EXISTS)
4828 ice_acquire_lock(rule_lock);
4829 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4831 ret = ICE_ERR_BAD_PTR;
4835 /* Don't enable counter for a filter for which sw marker was enabled */
4836 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4837 ret = ICE_ERR_PARAM;
4841 /* If a counter was already enabled then don't need to add again */
4842 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4843 ret = ICE_ERR_ALREADY_EXISTS;
4847 /* Allocate a hardware table entry to VLAN counter */
4848 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4852 /* Allocate a hardware table entry to hold large act. Two actions for
4853 * counter based large action
4855 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4859 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4862 /* Update the switch rule to add the counter action */
4863 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4865 ice_release_lock(rule_lock);
4870 ice_release_lock(rule_lock);
4871 /* only remove entry if it did not exist previously */
4873 ret = ice_remove_mac(hw, &l_head);
4878 /* This is mapping table entry that maps every word within a given protocol
4879 * structure to the real byte offset as per the specification of that
4881 * for example dst address is 3 words in ethertype header and corresponding
4882 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4883 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4884 * matching entry describing its field. This needs to be updated if new
4885 * structure is added to that union.
4887 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4888 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4889 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4890 { ICE_ETYPE_OL, { 0 } },
4891 { ICE_VLAN_OFOS, { 0, 2 } },
4892 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4893 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4894 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4895 26, 28, 30, 32, 34, 36, 38 } },
4896 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4897 26, 28, 30, 32, 34, 36, 38 } },
4898 { ICE_TCP_IL, { 0, 2 } },
4899 { ICE_UDP_OF, { 0, 2 } },
4900 { ICE_UDP_ILOS, { 0, 2 } },
4901 { ICE_SCTP_IL, { 0, 2 } },
4902 { ICE_VXLAN, { 8, 10, 12, 14 } },
4903 { ICE_GENEVE, { 8, 10, 12, 14 } },
4904 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4905 { ICE_NVGRE, { 0, 2, 4, 6 } },
4906 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4907 { ICE_PPPOE, { 0, 2, 4, 6 } },
4910 /* The following table describes preferred grouping of recipes.
4911 * If a recipe that needs to be programmed is a superset or matches one of the
4912 * following combinations, then the recipe needs to be chained as per the
4916 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4917 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4918 { ICE_MAC_IL, ICE_MAC_IL_HW },
4919 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4920 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4921 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4922 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4923 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4924 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4925 { ICE_TCP_IL, ICE_TCP_IL_HW },
4926 { ICE_UDP_OF, ICE_UDP_OF_HW },
4927 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4928 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4929 { ICE_VXLAN, ICE_UDP_OF_HW },
4930 { ICE_GENEVE, ICE_UDP_OF_HW },
4931 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4932 { ICE_NVGRE, ICE_GRE_OF_HW },
4933 { ICE_GTP, ICE_UDP_OF_HW },
4934 { ICE_PPPOE, ICE_PPPOE_HW },
4938 * ice_find_recp - find a recipe
4939 * @hw: pointer to the hardware structure
4940 * @lkup_exts: extension sequence to match
4942 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4944 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4946 bool refresh_required = true;
4947 struct ice_sw_recipe *recp;
4950 /* Walk through existing recipes to find a match */
4951 recp = hw->switch_info->recp_list;
4952 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4953 /* If recipe was not created for this ID, in SW bookkeeping,
4954 * check if FW has an entry for this recipe. If the FW has an
4955 * entry update it in our SW bookkeeping and continue with the
4958 if (!recp[i].recp_created)
4959 if (ice_get_recp_frm_fw(hw,
4960 hw->switch_info->recp_list, i,
4964 /* Skip inverse action recipes */
4965 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4966 ICE_AQ_RECIPE_ACT_INV_ACT)
4969 /* if number of words we are looking for match */
4970 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4971 struct ice_fv_word *a = lkup_exts->fv_words;
4972 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4976 for (p = 0; p < lkup_exts->n_val_words; p++) {
4977 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4979 if (a[p].off == b[q].off &&
4980 a[p].prot_id == b[q].prot_id)
4981 /* Found the "p"th word in the
4986 /* After walking through all the words in the
4987 * "i"th recipe if "p"th word was not found then
4988 * this recipe is not what we are looking for.
4989 * So break out from this loop and try the next
4992 if (q >= recp[i].lkup_exts.n_val_words) {
4997 /* If for "i"th recipe the found was never set to false
4998 * then it means we found our match
5001 return i; /* Return the recipe ID */
5004 return ICE_MAX_NUM_RECIPES;
5008 * ice_prot_type_to_id - get protocol ID from protocol type
5009 * @type: protocol type
5010 * @id: pointer to variable that will receive the ID
5012 * Returns true if found, false otherwise
5014 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5018 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5019 if (ice_prot_id_tbl[i].type == type) {
5020 *id = ice_prot_id_tbl[i].protocol_id;
5027 * ice_find_valid_words - count valid words
5028 * @rule: advanced rule with lookup information
5029 * @lkup_exts: byte offset extractions of the words that are valid
5031 * calculate valid words in a lookup rule using mask value
5034 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5035 struct ice_prot_lkup_ext *lkup_exts)
5037 u8 j, word, prot_id, ret_val;
5039 if (!ice_prot_type_to_id(rule->type, &prot_id))
5042 word = lkup_exts->n_val_words;
5044 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5045 if (((u16 *)&rule->m_u)[j] &&
5046 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5047 /* No more space to accommodate */
5048 if (word >= ICE_MAX_CHAIN_WORDS)
5050 lkup_exts->fv_words[word].off =
5051 ice_prot_ext[rule->type].offs[j];
5052 lkup_exts->fv_words[word].prot_id =
5053 ice_prot_id_tbl[rule->type].protocol_id;
5054 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5058 ret_val = word - lkup_exts->n_val_words;
5059 lkup_exts->n_val_words = word;
5065 * ice_create_first_fit_recp_def - Create a recipe grouping
5066 * @hw: pointer to the hardware structure
5067 * @lkup_exts: an array of protocol header extractions
5068 * @rg_list: pointer to a list that stores new recipe groups
5069 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5071 * Using first fit algorithm, take all the words that are still not done
5072 * and start grouping them in 4-word groups. Each group makes up one
5075 static enum ice_status
5076 ice_create_first_fit_recp_def(struct ice_hw *hw,
5077 struct ice_prot_lkup_ext *lkup_exts,
5078 struct LIST_HEAD_TYPE *rg_list,
5081 struct ice_pref_recipe_group *grp = NULL;
5086 /* Walk through every word in the rule to check if it is not done. If so
5087 * then this word needs to be part of a new recipe.
5089 for (j = 0; j < lkup_exts->n_val_words; j++)
5090 if (!ice_is_bit_set(lkup_exts->done, j)) {
5092 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5093 struct ice_recp_grp_entry *entry;
5095 entry = (struct ice_recp_grp_entry *)
5096 ice_malloc(hw, sizeof(*entry));
5098 return ICE_ERR_NO_MEMORY;
5099 LIST_ADD(&entry->l_entry, rg_list);
5100 grp = &entry->r_group;
5104 grp->pairs[grp->n_val_pairs].prot_id =
5105 lkup_exts->fv_words[j].prot_id;
5106 grp->pairs[grp->n_val_pairs].off =
5107 lkup_exts->fv_words[j].off;
5108 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5116 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5117 * @hw: pointer to the hardware structure
5118 * @fv_list: field vector with the extraction sequence information
5119 * @rg_list: recipe groupings with protocol-offset pairs
5121 * Helper function to fill in the field vector indices for protocol-offset
5122 * pairs. These indexes are then ultimately programmed into a recipe.
5124 static enum ice_status
5125 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5126 struct LIST_HEAD_TYPE *rg_list)
5128 struct ice_sw_fv_list_entry *fv;
5129 struct ice_recp_grp_entry *rg;
5130 struct ice_fv_word *fv_ext;
5132 if (LIST_EMPTY(fv_list))
5135 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5136 fv_ext = fv->fv_ptr->ew;
5138 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5141 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5142 struct ice_fv_word *pr;
5147 pr = &rg->r_group.pairs[i];
5148 mask = rg->r_group.mask[i];
5150 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5151 if (fv_ext[j].prot_id == pr->prot_id &&
5152 fv_ext[j].off == pr->off) {
5155 /* Store index of field vector */
5157 /* Mask is given by caller as big
5158 * endian, but sent to FW as little
5161 rg->fv_mask[i] = mask << 8 | mask >> 8;
5165 /* Protocol/offset could not be found, caller gave an
5169 return ICE_ERR_PARAM;
5177 * ice_find_free_recp_res_idx - find free result indexes for recipe
5178 * @hw: pointer to hardware structure
5179 * @profiles: bitmap of profiles that will be associated with the new recipe
5180 * @free_idx: pointer to variable to receive the free index bitmap
5182 * The algorithm used here is:
5183 * 1. When creating a new recipe, create a set P which contains all
5184 * Profiles that will be associated with our new recipe
5186 * 2. For each Profile p in set P:
5187 * a. Add all recipes associated with Profile p into set R
5188 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5189 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5190 * i. Or just assume they all have the same possible indexes:
5192 * i.e., PossibleIndexes = 0x0000F00000000000
5194 * 3. For each Recipe r in set R:
5195 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5196 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5198 * FreeIndexes will contain the bits indicating the indexes free for use,
5199 * then the code needs to update the recipe[r].used_result_idx_bits to
5200 * indicate which indexes were selected for use by this recipe.
5203 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5204 ice_bitmap_t *free_idx)
5206 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5207 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5208 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5212 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5213 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5214 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5215 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5217 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5218 ice_set_bit(count, possible_idx);
5220 /* For each profile we are going to associate the recipe with, add the
5221 * recipes that are associated with that profile. This will give us
5222 * the set of recipes that our recipe may collide with. Also, determine
5223 * what possible result indexes are usable given this set of profiles.
5226 while (ICE_MAX_NUM_PROFILES >
5227 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5228 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5229 ICE_MAX_NUM_RECIPES);
5230 ice_and_bitmap(possible_idx, possible_idx,
5231 hw->switch_info->prof_res_bm[bit],
5236 /* For each recipe that our new recipe may collide with, determine
5237 * which indexes have been used.
5239 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5240 if (ice_is_bit_set(recipes, bit)) {
5241 ice_or_bitmap(used_idx, used_idx,
5242 hw->switch_info->recp_list[bit].res_idxs,
5246 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5248 /* return number of free indexes */
5251 while (ICE_MAX_FV_WORDS >
5252 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5261 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5262 * @hw: pointer to hardware structure
5263 * @rm: recipe management list entry
5264 * @match_tun: if field vector index for tunnel needs to be programmed
5265 * @profiles: bitmap of profiles that will be assocated.
5267 static enum ice_status
5268 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5269 bool match_tun, ice_bitmap_t *profiles)
5271 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5272 struct ice_aqc_recipe_data_elem *tmp;
5273 struct ice_aqc_recipe_data_elem *buf;
5274 struct ice_recp_grp_entry *entry;
5275 enum ice_status status;
5281 /* When more than one recipe are required, another recipe is needed to
5282 * chain them together. Matching a tunnel metadata ID takes up one of
5283 * the match fields in the chaining recipe reducing the number of
5284 * chained recipes by one.
5286 /* check number of free result indices */
5287 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5288 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5290 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5291 free_res_idx, rm->n_grp_count);
5293 if (rm->n_grp_count > 1) {
5294 if (rm->n_grp_count > free_res_idx)
5295 return ICE_ERR_MAX_LIMIT;
5300 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5301 ICE_MAX_NUM_RECIPES,
5304 return ICE_ERR_NO_MEMORY;
5306 buf = (struct ice_aqc_recipe_data_elem *)
5307 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5309 status = ICE_ERR_NO_MEMORY;
5313 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5314 recipe_count = ICE_MAX_NUM_RECIPES;
5315 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5317 if (status || recipe_count == 0)
5320 /* Allocate the recipe resources, and configure them according to the
5321 * match fields from protocol headers and extracted field vectors.
5323 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5324 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5327 status = ice_alloc_recipe(hw, &entry->rid);
5331 /* Clear the result index of the located recipe, as this will be
5332 * updated, if needed, later in the recipe creation process.
5334 tmp[0].content.result_indx = 0;
5336 buf[recps] = tmp[0];
5337 buf[recps].recipe_indx = (u8)entry->rid;
5338 /* if the recipe is a non-root recipe RID should be programmed
5339 * as 0 for the rules to be applied correctly.
5341 buf[recps].content.rid = 0;
5342 ice_memset(&buf[recps].content.lkup_indx, 0,
5343 sizeof(buf[recps].content.lkup_indx),
5346 /* All recipes use look-up index 0 to match switch ID. */
5347 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5348 buf[recps].content.mask[0] =
5349 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5350 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5353 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5354 buf[recps].content.lkup_indx[i] = 0x80;
5355 buf[recps].content.mask[i] = 0;
5358 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5359 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5360 buf[recps].content.mask[i + 1] =
5361 CPU_TO_LE16(entry->fv_mask[i]);
5364 if (rm->n_grp_count > 1) {
5365 /* Checks to see if there really is a valid result index
5368 if (chain_idx >= ICE_MAX_FV_WORDS) {
5369 ice_debug(hw, ICE_DBG_SW,
5370 "No chain index available\n");
5371 status = ICE_ERR_MAX_LIMIT;
5375 entry->chain_idx = chain_idx;
5376 buf[recps].content.result_indx =
5377 ICE_AQ_RECIPE_RESULT_EN |
5378 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5379 ICE_AQ_RECIPE_RESULT_DATA_M);
5380 ice_clear_bit(chain_idx, result_idx_bm);
5381 chain_idx = ice_find_first_bit(result_idx_bm,
5385 /* fill recipe dependencies */
5386 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5387 ICE_MAX_NUM_RECIPES);
5388 ice_set_bit(buf[recps].recipe_indx,
5389 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5390 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5394 if (rm->n_grp_count == 1) {
5395 rm->root_rid = buf[0].recipe_indx;
5396 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5397 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5398 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5399 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5400 sizeof(buf[0].recipe_bitmap),
5401 ICE_NONDMA_TO_NONDMA);
5403 status = ICE_ERR_BAD_PTR;
5406 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5407 * the recipe which is getting created if specified
5408 * by user. Usually any advanced switch filter, which results
5409 * into new extraction sequence, ended up creating a new recipe
5410 * of type ROOT and usually recipes are associated with profiles
5411 * Switch rule referreing newly created recipe, needs to have
5412 * either/or 'fwd' or 'join' priority, otherwise switch rule
5413 * evaluation will not happen correctly. In other words, if
5414 * switch rule to be evaluated on priority basis, then recipe
5415 * needs to have priority, otherwise it will be evaluated last.
5417 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5419 struct ice_recp_grp_entry *last_chain_entry;
5422 /* Allocate the last recipe that will chain the outcomes of the
5423 * other recipes together
5425 status = ice_alloc_recipe(hw, &rid);
5429 buf[recps].recipe_indx = (u8)rid;
5430 buf[recps].content.rid = (u8)rid;
5431 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5432 /* the new entry created should also be part of rg_list to
5433 * make sure we have complete recipe
5435 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5436 sizeof(*last_chain_entry));
5437 if (!last_chain_entry) {
5438 status = ICE_ERR_NO_MEMORY;
5441 last_chain_entry->rid = rid;
5442 ice_memset(&buf[recps].content.lkup_indx, 0,
5443 sizeof(buf[recps].content.lkup_indx),
5445 /* All recipes use look-up index 0 to match switch ID. */
5446 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5447 buf[recps].content.mask[0] =
5448 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5449 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5450 buf[recps].content.lkup_indx[i] =
5451 ICE_AQ_RECIPE_LKUP_IGNORE;
5452 buf[recps].content.mask[i] = 0;
5456 /* update r_bitmap with the recp that is used for chaining */
5457 ice_set_bit(rid, rm->r_bitmap);
5458 /* this is the recipe that chains all the other recipes so it
5459 * should not have a chaining ID to indicate the same
5461 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5462 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5464 last_chain_entry->fv_idx[i] = entry->chain_idx;
5465 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5466 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5467 ice_set_bit(entry->rid, rm->r_bitmap);
5469 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5470 if (sizeof(buf[recps].recipe_bitmap) >=
5471 sizeof(rm->r_bitmap)) {
5472 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5473 sizeof(buf[recps].recipe_bitmap),
5474 ICE_NONDMA_TO_NONDMA);
5476 status = ICE_ERR_BAD_PTR;
5479 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5481 /* To differentiate among different UDP tunnels, a meta data ID
5485 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5486 buf[recps].content.mask[i] =
5487 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5491 rm->root_rid = (u8)rid;
5493 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5497 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5498 ice_release_change_lock(hw);
5502 /* Every recipe that just got created add it to the recipe
5505 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5506 struct ice_switch_info *sw = hw->switch_info;
5507 bool is_root, idx_found = false;
5508 struct ice_sw_recipe *recp;
5509 u16 idx, buf_idx = 0;
5511 /* find buffer index for copying some data */
5512 for (idx = 0; idx < rm->n_grp_count; idx++)
5513 if (buf[idx].recipe_indx == entry->rid) {
5519 status = ICE_ERR_OUT_OF_RANGE;
5523 recp = &sw->recp_list[entry->rid];
5524 is_root = (rm->root_rid == entry->rid);
5525 recp->is_root = is_root;
5527 recp->root_rid = entry->rid;
5528 recp->big_recp = (is_root && rm->n_grp_count > 1);
5530 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5531 entry->r_group.n_val_pairs *
5532 sizeof(struct ice_fv_word),
5533 ICE_NONDMA_TO_NONDMA);
5535 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5536 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5538 /* Copy non-result fv index values and masks to recipe. This
5539 * call will also update the result recipe bitmask.
5541 ice_collect_result_idx(&buf[buf_idx], recp);
5543 /* for non-root recipes, also copy to the root, this allows
5544 * easier matching of a complete chained recipe
5547 ice_collect_result_idx(&buf[buf_idx],
5548 &sw->recp_list[rm->root_rid]);
5550 recp->n_ext_words = entry->r_group.n_val_pairs;
5551 recp->chain_idx = entry->chain_idx;
5552 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5553 recp->n_grp_count = rm->n_grp_count;
5554 recp->tun_type = rm->tun_type;
5555 recp->recp_created = true;
5570 * ice_create_recipe_group - creates recipe group
5571 * @hw: pointer to hardware structure
5572 * @rm: recipe management list entry
5573 * @lkup_exts: lookup elements
5575 static enum ice_status
5576 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5577 struct ice_prot_lkup_ext *lkup_exts)
5579 enum ice_status status;
5582 rm->n_grp_count = 0;
5584 /* Create recipes for words that are marked not done by packing them
5587 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5588 &rm->rg_list, &recp_count);
5590 rm->n_grp_count += recp_count;
5591 rm->n_ext_words = lkup_exts->n_val_words;
5592 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5593 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5594 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5595 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5602 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5603 * @hw: pointer to hardware structure
5604 * @lkups: lookup elements or match criteria for the advanced recipe, one
5605 * structure per protocol header
5606 * @lkups_cnt: number of protocols
5607 * @bm: bitmap of field vectors to consider
5608 * @fv_list: pointer to a list that holds the returned field vectors
5610 static enum ice_status
5611 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5612 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5614 enum ice_status status;
5618 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5620 return ICE_ERR_NO_MEMORY;
5622 for (i = 0; i < lkups_cnt; i++)
5623 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5624 status = ICE_ERR_CFG;
5628 /* Find field vectors that include all specified protocol types */
5629 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5632 ice_free(hw, prot_ids);
5637 * ice_add_special_words - Add words that are not protocols, such as metadata
5638 * @rinfo: other information regarding the rule e.g. priority and action info
5639 * @lkup_exts: lookup word structure
5641 static enum ice_status
5642 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5643 struct ice_prot_lkup_ext *lkup_exts)
5645 /* If this is a tunneled packet, then add recipe index to match the
5646 * tunnel bit in the packet metadata flags.
5648 if (rinfo->tun_type != ICE_NON_TUN) {
5649 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5650 u8 word = lkup_exts->n_val_words++;
5652 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5653 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5655 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5657 return ICE_ERR_MAX_LIMIT;
5664 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5665 * @hw: pointer to hardware structure
5666 * @rinfo: other information regarding the rule e.g. priority and action info
5667 * @bm: pointer to memory for returning the bitmap of field vectors
5670 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5673 enum ice_prof_type prof_type;
5675 switch (rinfo->tun_type) {
5677 prof_type = ICE_PROF_NON_TUN;
5679 case ICE_ALL_TUNNELS:
5680 prof_type = ICE_PROF_TUN_ALL;
5682 case ICE_SW_TUN_VXLAN_GPE:
5683 case ICE_SW_TUN_GENEVE:
5684 case ICE_SW_TUN_VXLAN:
5685 case ICE_SW_TUN_UDP:
5686 case ICE_SW_TUN_GTP:
5687 prof_type = ICE_PROF_TUN_UDP;
5689 case ICE_SW_TUN_NVGRE:
5690 prof_type = ICE_PROF_TUN_GRE;
5692 case ICE_SW_TUN_PPPOE:
5693 prof_type = ICE_PROF_TUN_PPPOE;
5695 case ICE_SW_TUN_AND_NON_TUN:
5697 prof_type = ICE_PROF_ALL;
5701 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5705 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5706 * @hw: pointer to hardware structure
5707 * @lkups: lookup elements or match criteria for the advanced recipe, one
5708 * structure per protocol header
5709 * @lkups_cnt: number of protocols
5710 * @rinfo: other information regarding the rule e.g. priority and action info
5711 * @rid: return the recipe ID of the recipe created
5713 static enum ice_status
5714 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5715 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5717 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5718 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5719 struct ice_prot_lkup_ext *lkup_exts;
5720 struct ice_recp_grp_entry *r_entry;
5721 struct ice_sw_fv_list_entry *fvit;
5722 struct ice_recp_grp_entry *r_tmp;
5723 struct ice_sw_fv_list_entry *tmp;
5724 enum ice_status status = ICE_SUCCESS;
5725 struct ice_sw_recipe *rm;
5726 bool match_tun = false;
5730 return ICE_ERR_PARAM;
5732 lkup_exts = (struct ice_prot_lkup_ext *)
5733 ice_malloc(hw, sizeof(*lkup_exts));
5735 return ICE_ERR_NO_MEMORY;
5737 /* Determine the number of words to be matched and if it exceeds a
5738 * recipe's restrictions
5740 for (i = 0; i < lkups_cnt; i++) {
5743 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5744 status = ICE_ERR_CFG;
5745 goto err_free_lkup_exts;
5748 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5750 status = ICE_ERR_CFG;
5751 goto err_free_lkup_exts;
5755 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5757 status = ICE_ERR_NO_MEMORY;
5758 goto err_free_lkup_exts;
5761 /* Get field vectors that contain fields extracted from all the protocol
5762 * headers being programmed.
5764 INIT_LIST_HEAD(&rm->fv_list);
5765 INIT_LIST_HEAD(&rm->rg_list);
5767 /* Get bitmap of field vectors (profiles) that are compatible with the
5768 * rule request; only these will be searched in the subsequent call to
5771 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5773 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5777 /* Group match words into recipes using preferred recipe grouping
5780 status = ice_create_recipe_group(hw, rm, lkup_exts);
5784 /* There is only profile for UDP tunnels. So, it is necessary to use a
5785 * metadata ID flag to differentiate different tunnel types. A separate
5786 * recipe needs to be used for the metadata.
5788 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5789 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5790 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5793 /* set the recipe priority if specified */
5794 rm->priority = (u8)rinfo->priority;
5796 /* Find offsets from the field vector. Pick the first one for all the
5799 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5803 /* get bitmap of all profiles the recipe will be associated with */
5804 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5805 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5807 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5808 ice_set_bit((u16)fvit->profile_id, profiles);
5811 /* Create any special protocol/offset pairs, such as looking at tunnel
5812 * bits by extracting metadata
5814 status = ice_add_special_words(rinfo, lkup_exts);
5816 goto err_free_lkup_exts;
5818 /* Look for a recipe which matches our requested fv / mask list */
5819 *rid = ice_find_recp(hw, lkup_exts);
5820 if (*rid < ICE_MAX_NUM_RECIPES)
5821 /* Success if found a recipe that match the existing criteria */
5824 /* Recipe we need does not exist, add a recipe */
5825 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5829 /* Associate all the recipes created with all the profiles in the
5830 * common field vector.
5832 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5834 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5837 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5838 (u8 *)r_bitmap, NULL);
5842 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5843 ICE_MAX_NUM_RECIPES);
5844 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5848 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5851 ice_release_change_lock(hw);
5856 /* Update profile to recipe bitmap array */
5857 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5858 ICE_MAX_NUM_RECIPES);
5860 /* Update recipe to profile bitmap array */
5861 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5862 if (ice_is_bit_set(r_bitmap, j))
5863 ice_set_bit((u16)fvit->profile_id,
5864 recipe_to_profile[j]);
5867 *rid = rm->root_rid;
5868 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5869 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5871 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5872 ice_recp_grp_entry, l_entry) {
5873 LIST_DEL(&r_entry->l_entry);
5874 ice_free(hw, r_entry);
5877 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5879 LIST_DEL(&fvit->list_entry);
5884 ice_free(hw, rm->root_buf);
5889 ice_free(hw, lkup_exts);
5895 * ice_find_dummy_packet - find dummy packet by tunnel type
5897 * @lkups: lookup elements or match criteria for the advanced recipe, one
5898 * structure per protocol header
5899 * @lkups_cnt: number of protocols
5900 * @tun_type: tunnel type from the match criteria
5901 * @pkt: dummy packet to fill according to filter match criteria
5902 * @pkt_len: packet length of dummy packet
5903 * @offsets: pointer to receive the pointer to the offsets for the packet
5906 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5907 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5909 const struct ice_dummy_pkt_offsets **offsets)
5911 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5915 if (tun_type == ICE_SW_TUN_GTP) {
5916 *pkt = dummy_udp_gtp_packet;
5917 *pkt_len = sizeof(dummy_udp_gtp_packet);
5918 *offsets = dummy_udp_gtp_packet_offsets;
5921 if (tun_type == ICE_SW_TUN_PPPOE) {
5922 *pkt = dummy_pppoe_packet;
5923 *pkt_len = sizeof(dummy_pppoe_packet);
5924 *offsets = dummy_pppoe_packet_offsets;
5927 for (i = 0; i < lkups_cnt; i++) {
5928 if (lkups[i].type == ICE_UDP_ILOS)
5930 else if (lkups[i].type == ICE_TCP_IL)
5932 else if (lkups[i].type == ICE_IPV6_OFOS)
5934 else if (lkups[i].type == ICE_VLAN_OFOS)
5936 else if (lkups[i].type == ICE_IPV4_OFOS &&
5937 lkups[i].h_u.ipv4_hdr.protocol ==
5938 ICE_IPV4_NVGRE_PROTO_ID &&
5939 lkups[i].m_u.ipv4_hdr.protocol ==
5944 if (tun_type == ICE_ALL_TUNNELS) {
5945 *pkt = dummy_gre_udp_packet;
5946 *pkt_len = sizeof(dummy_gre_udp_packet);
5947 *offsets = dummy_gre_udp_packet_offsets;
5951 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
5953 *pkt = dummy_gre_tcp_packet;
5954 *pkt_len = sizeof(dummy_gre_tcp_packet);
5955 *offsets = dummy_gre_tcp_packet_offsets;
5959 *pkt = dummy_gre_udp_packet;
5960 *pkt_len = sizeof(dummy_gre_udp_packet);
5961 *offsets = dummy_gre_udp_packet_offsets;
5965 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5966 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5968 *pkt = dummy_udp_tun_tcp_packet;
5969 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5970 *offsets = dummy_udp_tun_tcp_packet_offsets;
5974 *pkt = dummy_udp_tun_udp_packet;
5975 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5976 *offsets = dummy_udp_tun_udp_packet_offsets;
5982 *pkt = dummy_vlan_udp_packet;
5983 *pkt_len = sizeof(dummy_vlan_udp_packet);
5984 *offsets = dummy_vlan_udp_packet_offsets;
5987 *pkt = dummy_udp_packet;
5988 *pkt_len = sizeof(dummy_udp_packet);
5989 *offsets = dummy_udp_packet_offsets;
5991 } else if (udp && ipv6) {
5993 *pkt = dummy_vlan_udp_ipv6_packet;
5994 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5995 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5998 *pkt = dummy_udp_ipv6_packet;
5999 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6000 *offsets = dummy_udp_ipv6_packet_offsets;
6002 } else if ((tcp && ipv6) || ipv6) {
6004 *pkt = dummy_vlan_tcp_ipv6_packet;
6005 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6006 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6009 *pkt = dummy_tcp_ipv6_packet;
6010 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6011 *offsets = dummy_tcp_ipv6_packet_offsets;
6016 *pkt = dummy_vlan_tcp_packet;
6017 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6018 *offsets = dummy_vlan_tcp_packet_offsets;
6020 *pkt = dummy_tcp_packet;
6021 *pkt_len = sizeof(dummy_tcp_packet);
6022 *offsets = dummy_tcp_packet_offsets;
6027 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6029 * @lkups: lookup elements or match criteria for the advanced recipe, one
6030 * structure per protocol header
6031 * @lkups_cnt: number of protocols
6032 * @s_rule: stores rule information from the match criteria
6033 * @dummy_pkt: dummy packet to fill according to filter match criteria
6034 * @pkt_len: packet length of dummy packet
6035 * @offsets: offset info for the dummy packet
6037 static enum ice_status
6038 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6039 struct ice_aqc_sw_rules_elem *s_rule,
6040 const u8 *dummy_pkt, u16 pkt_len,
6041 const struct ice_dummy_pkt_offsets *offsets)
6046 /* Start with a packet with a pre-defined/dummy content. Then, fill
6047 * in the header values to be looked up or matched.
6049 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6051 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6053 for (i = 0; i < lkups_cnt; i++) {
6054 enum ice_protocol_type type;
6055 u16 offset = 0, len = 0, j;
6058 /* find the start of this layer; it should be found since this
6059 * was already checked when search for the dummy packet
6061 type = lkups[i].type;
6062 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6063 if (type == offsets[j].type) {
6064 offset = offsets[j].offset;
6069 /* this should never happen in a correct calling sequence */
6071 return ICE_ERR_PARAM;
6073 switch (lkups[i].type) {
6076 len = sizeof(struct ice_ether_hdr);
6079 len = sizeof(struct ice_ethtype_hdr);
6082 len = sizeof(struct ice_vlan_hdr);
6086 len = sizeof(struct ice_ipv4_hdr);
6090 len = sizeof(struct ice_ipv6_hdr);
6095 len = sizeof(struct ice_l4_hdr);
6098 len = sizeof(struct ice_sctp_hdr);
6101 len = sizeof(struct ice_nvgre);
6106 len = sizeof(struct ice_udp_tnl_hdr);
6110 len = sizeof(struct ice_udp_gtp_hdr);
6113 len = sizeof(struct ice_pppoe_hdr);
6116 return ICE_ERR_PARAM;
6119 /* the length should be a word multiple */
6120 if (len % ICE_BYTES_PER_WORD)
6123 /* We have the offset to the header start, the length, the
6124 * caller's header values and mask. Use this information to
6125 * copy the data into the dummy packet appropriately based on
6126 * the mask. Note that we need to only write the bits as
6127 * indicated by the mask to make sure we don't improperly write
6128 * over any significant packet data.
6130 for (j = 0; j < len / sizeof(u16); j++)
6131 if (((u16 *)&lkups[i].m_u)[j])
6132 ((u16 *)(pkt + offset))[j] =
6133 (((u16 *)(pkt + offset))[j] &
6134 ~((u16 *)&lkups[i].m_u)[j]) |
6135 (((u16 *)&lkups[i].h_u)[j] &
6136 ((u16 *)&lkups[i].m_u)[j]);
6139 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6145 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6146 * @hw: pointer to the hardware structure
6147 * @tun_type: tunnel type
6148 * @pkt: dummy packet to fill in
6149 * @offsets: offset info for the dummy packet
6151 static enum ice_status
6152 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6153 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6158 case ICE_SW_TUN_AND_NON_TUN:
6159 case ICE_SW_TUN_VXLAN_GPE:
6160 case ICE_SW_TUN_VXLAN:
6161 case ICE_SW_TUN_UDP:
6162 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6166 case ICE_SW_TUN_GENEVE:
6167 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6172 /* Nothing needs to be done for this tunnel type */
6176 /* Find the outer UDP protocol header and insert the port number */
6177 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6178 if (offsets[i].type == ICE_UDP_OF) {
6179 struct ice_l4_hdr *hdr;
6182 offset = offsets[i].offset;
6183 hdr = (struct ice_l4_hdr *)&pkt[offset];
6184 hdr->dst_port = CPU_TO_BE16(open_port);
6194 * ice_find_adv_rule_entry - Search a rule entry
6195 * @hw: pointer to the hardware structure
6196 * @lkups: lookup elements or match criteria for the advanced recipe, one
6197 * structure per protocol header
6198 * @lkups_cnt: number of protocols
6199 * @recp_id: recipe ID for which we are finding the rule
6200 * @rinfo: other information regarding the rule e.g. priority and action info
6202 * Helper function to search for a given advance rule entry
6203 * Returns pointer to entry storing the rule if found
6205 static struct ice_adv_fltr_mgmt_list_entry *
6206 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6207 u16 lkups_cnt, u16 recp_id,
6208 struct ice_adv_rule_info *rinfo)
6210 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6211 struct ice_switch_info *sw = hw->switch_info;
6214 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6215 ice_adv_fltr_mgmt_list_entry, list_entry) {
6216 bool lkups_matched = true;
6218 if (lkups_cnt != list_itr->lkups_cnt)
6220 for (i = 0; i < list_itr->lkups_cnt; i++)
6221 if (memcmp(&list_itr->lkups[i], &lkups[i],
6223 lkups_matched = false;
6226 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6227 rinfo->tun_type == list_itr->rule_info.tun_type &&
6235 * ice_adv_add_update_vsi_list
6236 * @hw: pointer to the hardware structure
6237 * @m_entry: pointer to current adv filter management list entry
6238 * @cur_fltr: filter information from the book keeping entry
6239 * @new_fltr: filter information with the new VSI to be added
6241 * Call AQ command to add or update previously created VSI list with new VSI.
6243 * Helper function to do book keeping associated with adding filter information
6244 * The algorithm to do the booking keeping is described below :
6245 * When a VSI needs to subscribe to a given advanced filter
6246 * if only one VSI has been added till now
6247 * Allocate a new VSI list and add two VSIs
6248 * to this list using switch rule command
6249 * Update the previously created switch rule with the
6250 * newly created VSI list ID
6251 * if a VSI list was previously created
6252 * Add the new VSI to the previously created VSI list set
6253 * using the update switch rule command
6255 static enum ice_status
6256 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6257 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6258 struct ice_adv_rule_info *cur_fltr,
6259 struct ice_adv_rule_info *new_fltr)
6261 enum ice_status status;
6262 u16 vsi_list_id = 0;
6264 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6265 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6266 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6267 return ICE_ERR_NOT_IMPL;
6269 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6270 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6271 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6272 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6273 return ICE_ERR_NOT_IMPL;
6275 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6276 /* Only one entry existed in the mapping and it was not already
6277 * a part of a VSI list. So, create a VSI list with the old and
6280 struct ice_fltr_info tmp_fltr;
6281 u16 vsi_handle_arr[2];
6283 /* A rule already exists with the new VSI being added */
6284 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6285 new_fltr->sw_act.fwd_id.hw_vsi_id)
6286 return ICE_ERR_ALREADY_EXISTS;
6288 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6289 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6290 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6296 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6297 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6298 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6299 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6300 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6302 /* Update the previous switch rule of "forward to VSI" to
6305 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6309 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6310 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6311 m_entry->vsi_list_info =
6312 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6315 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6317 if (!m_entry->vsi_list_info)
6320 /* A rule already exists with the new VSI being added */
6321 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6324 /* Update the previously created VSI list set with
6325 * the new VSI ID passed in
6327 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6329 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6331 ice_aqc_opc_update_sw_rules,
6333 /* update VSI list mapping info with new VSI ID */
6335 ice_set_bit(vsi_handle,
6336 m_entry->vsi_list_info->vsi_map);
6339 m_entry->vsi_count++;
6344 * ice_add_adv_rule - helper function to create an advanced switch rule
6345 * @hw: pointer to the hardware structure
6346 * @lkups: information on the words that needs to be looked up. All words
6347 * together makes one recipe
6348 * @lkups_cnt: num of entries in the lkups array
6349 * @rinfo: other information related to the rule that needs to be programmed
6350 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6351 * ignored is case of error.
6353 * This function can program only 1 rule at a time. The lkups is used to
6354 * describe the all the words that forms the "lookup" portion of the recipe.
6355 * These words can span multiple protocols. Callers to this function need to
6356 * pass in a list of protocol headers with lookup information along and mask
6357 * that determines which words are valid from the given protocol header.
6358 * rinfo describes other information related to this rule such as forwarding
6359 * IDs, priority of this rule, etc.
6362 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6363 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6364 struct ice_rule_query_data *added_entry)
6366 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6367 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6368 const struct ice_dummy_pkt_offsets *pkt_offsets;
6369 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6370 struct LIST_HEAD_TYPE *rule_head;
6371 struct ice_switch_info *sw;
6372 enum ice_status status;
6373 const u8 *pkt = NULL;
6378 /* Initialize profile to result index bitmap */
6379 if (!hw->switch_info->prof_res_bm_init) {
6380 hw->switch_info->prof_res_bm_init = 1;
6381 ice_init_prof_result_bm(hw);
6385 return ICE_ERR_PARAM;
6387 /* get # of words we need to match */
6389 for (i = 0; i < lkups_cnt; i++) {
6392 ptr = (u16 *)&lkups[i].m_u;
6393 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6397 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6398 return ICE_ERR_PARAM;
6400 /* make sure that we can locate a dummy packet */
6401 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6404 status = ICE_ERR_PARAM;
6405 goto err_ice_add_adv_rule;
6408 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6409 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6410 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6411 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6414 vsi_handle = rinfo->sw_act.vsi_handle;
6415 if (!ice_is_vsi_valid(hw, vsi_handle))
6416 return ICE_ERR_PARAM;
6418 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6419 rinfo->sw_act.fwd_id.hw_vsi_id =
6420 ice_get_hw_vsi_num(hw, vsi_handle);
6421 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6422 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6424 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6427 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6429 /* we have to add VSI to VSI_LIST and increment vsi_count.
6430 * Also Update VSI list so that we can change forwarding rule
6431 * if the rule already exists, we will check if it exists with
6432 * same vsi_id, if not then add it to the VSI list if it already
6433 * exists if not then create a VSI list and add the existing VSI
6434 * ID and the new VSI ID to the list
6435 * We will add that VSI to the list
6437 status = ice_adv_add_update_vsi_list(hw, m_entry,
6438 &m_entry->rule_info,
6441 added_entry->rid = rid;
6442 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6443 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6447 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6448 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6450 return ICE_ERR_NO_MEMORY;
6451 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6452 switch (rinfo->sw_act.fltr_act) {
6453 case ICE_FWD_TO_VSI:
6454 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6455 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6456 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6459 act |= ICE_SINGLE_ACT_TO_Q;
6460 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6461 ICE_SINGLE_ACT_Q_INDEX_M;
6463 case ICE_FWD_TO_QGRP:
6464 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6465 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6466 act |= ICE_SINGLE_ACT_TO_Q;
6467 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6468 ICE_SINGLE_ACT_Q_INDEX_M;
6469 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6470 ICE_SINGLE_ACT_Q_REGION_M;
6472 case ICE_DROP_PACKET:
6473 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6474 ICE_SINGLE_ACT_VALID_BIT;
6477 status = ICE_ERR_CFG;
6478 goto err_ice_add_adv_rule;
6481 /* set the rule LOOKUP type based on caller specified 'RX'
6482 * instead of hardcoding it to be either LOOKUP_TX/RX
6484 * for 'RX' set the source to be the port number
6485 * for 'TX' set the source to be the source HW VSI number (determined
6489 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6490 s_rule->pdata.lkup_tx_rx.src =
6491 CPU_TO_LE16(hw->port_info->lport);
6493 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6494 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6497 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6498 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6500 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6501 pkt_len, pkt_offsets);
6503 goto err_ice_add_adv_rule;
6505 if (rinfo->tun_type != ICE_NON_TUN &&
6506 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6507 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6508 s_rule->pdata.lkup_tx_rx.hdr,
6511 goto err_ice_add_adv_rule;
6514 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6515 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6518 goto err_ice_add_adv_rule;
6519 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6520 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6522 status = ICE_ERR_NO_MEMORY;
6523 goto err_ice_add_adv_rule;
6526 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6527 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6528 ICE_NONDMA_TO_NONDMA);
6529 if (!adv_fltr->lkups) {
6530 status = ICE_ERR_NO_MEMORY;
6531 goto err_ice_add_adv_rule;
6534 adv_fltr->lkups_cnt = lkups_cnt;
6535 adv_fltr->rule_info = *rinfo;
6536 adv_fltr->rule_info.fltr_rule_id =
6537 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6538 sw = hw->switch_info;
6539 sw->recp_list[rid].adv_rule = true;
6540 rule_head = &sw->recp_list[rid].filt_rules;
6542 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6543 struct ice_fltr_info tmp_fltr;
6545 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6546 tmp_fltr.fltr_rule_id =
6547 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6548 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6549 tmp_fltr.fwd_id.hw_vsi_id =
6550 ice_get_hw_vsi_num(hw, vsi_handle);
6551 tmp_fltr.vsi_handle = vsi_handle;
6552 /* Update the previous switch rule of "forward to VSI" to
6555 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6557 goto err_ice_add_adv_rule;
6558 adv_fltr->vsi_count = 1;
6561 /* Add rule entry to book keeping list */
6562 LIST_ADD(&adv_fltr->list_entry, rule_head);
6564 added_entry->rid = rid;
6565 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6566 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6568 err_ice_add_adv_rule:
6569 if (status && adv_fltr) {
6570 ice_free(hw, adv_fltr->lkups);
6571 ice_free(hw, adv_fltr);
6574 ice_free(hw, s_rule);
6580 * ice_adv_rem_update_vsi_list
6581 * @hw: pointer to the hardware structure
6582 * @vsi_handle: VSI handle of the VSI to remove
6583 * @fm_list: filter management entry for which the VSI list management needs to
6586 static enum ice_status
6587 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6588 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6590 struct ice_vsi_list_map_info *vsi_list_info;
6591 enum ice_sw_lkup_type lkup_type;
6592 enum ice_status status;
6595 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6596 fm_list->vsi_count == 0)
6597 return ICE_ERR_PARAM;
6599 /* A rule with the VSI being removed does not exist */
6600 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6601 return ICE_ERR_DOES_NOT_EXIST;
6603 lkup_type = ICE_SW_LKUP_LAST;
6604 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6605 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6606 ice_aqc_opc_update_sw_rules,
6611 fm_list->vsi_count--;
6612 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6613 vsi_list_info = fm_list->vsi_list_info;
6614 if (fm_list->vsi_count == 1) {
6615 struct ice_fltr_info tmp_fltr;
6618 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6620 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6621 return ICE_ERR_OUT_OF_RANGE;
6623 /* Make sure VSI list is empty before removing it below */
6624 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6626 ice_aqc_opc_update_sw_rules,
6631 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6632 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6633 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6634 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6635 tmp_fltr.fwd_id.hw_vsi_id =
6636 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6637 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6638 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6640 /* Update the previous switch rule of "MAC forward to VSI" to
6641 * "MAC fwd to VSI list"
6643 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6645 ice_debug(hw, ICE_DBG_SW,
6646 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6647 tmp_fltr.fwd_id.hw_vsi_id, status);
6651 /* Remove the VSI list since it is no longer used */
6652 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6654 ice_debug(hw, ICE_DBG_SW,
6655 "Failed to remove VSI list %d, error %d\n",
6656 vsi_list_id, status);
6660 LIST_DEL(&vsi_list_info->list_entry);
6661 ice_free(hw, vsi_list_info);
6662 fm_list->vsi_list_info = NULL;
6669 * ice_rem_adv_rule - removes existing advanced switch rule
6670 * @hw: pointer to the hardware structure
6671 * @lkups: information on the words that needs to be looked up. All words
6672 * together makes one recipe
6673 * @lkups_cnt: num of entries in the lkups array
6674 * @rinfo: Its the pointer to the rule information for the rule
6676 * This function can be used to remove 1 rule at a time. The lkups is
6677 * used to describe all the words that forms the "lookup" portion of the
6678 * rule. These words can span multiple protocols. Callers to this function
6679 * need to pass in a list of protocol headers with lookup information along
6680 * and mask that determines which words are valid from the given protocol
6681 * header. rinfo describes other information related to this rule such as
6682 * forwarding IDs, priority of this rule, etc.
6685 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6686 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6688 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6689 struct ice_prot_lkup_ext lkup_exts;
6690 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6691 enum ice_status status = ICE_SUCCESS;
6692 bool remove_rule = false;
6693 u16 i, rid, vsi_handle;
6695 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6696 for (i = 0; i < lkups_cnt; i++) {
6699 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6702 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6707 /* Create any special protocol/offset pairs, such as looking at tunnel
6708 * bits by extracting metadata
6710 status = ice_add_special_words(rinfo, &lkup_exts);
6714 rid = ice_find_recp(hw, &lkup_exts);
6715 /* If did not find a recipe that match the existing criteria */
6716 if (rid == ICE_MAX_NUM_RECIPES)
6717 return ICE_ERR_PARAM;
6719 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6720 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6721 /* the rule is already removed */
6724 ice_acquire_lock(rule_lock);
6725 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6727 } else if (list_elem->vsi_count > 1) {
6728 list_elem->vsi_list_info->ref_cnt--;
6729 remove_rule = false;
6730 vsi_handle = rinfo->sw_act.vsi_handle;
6731 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6733 vsi_handle = rinfo->sw_act.vsi_handle;
6734 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6736 ice_release_lock(rule_lock);
6739 if (list_elem->vsi_count == 0)
6742 ice_release_lock(rule_lock);
6744 struct ice_aqc_sw_rules_elem *s_rule;
6747 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6749 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6752 return ICE_ERR_NO_MEMORY;
6753 s_rule->pdata.lkup_tx_rx.act = 0;
6754 s_rule->pdata.lkup_tx_rx.index =
6755 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6756 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6757 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6759 ice_aqc_opc_remove_sw_rules, NULL);
6760 if (status == ICE_SUCCESS) {
6761 ice_acquire_lock(rule_lock);
6762 LIST_DEL(&list_elem->list_entry);
6763 ice_free(hw, list_elem->lkups);
6764 ice_free(hw, list_elem);
6765 ice_release_lock(rule_lock);
6767 ice_free(hw, s_rule);
6773 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6774 * @hw: pointer to the hardware structure
6775 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6777 * This function is used to remove 1 rule at a time. The removal is based on
6778 * the remove_entry parameter. This function will remove rule for a given
6779 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6782 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6783 struct ice_rule_query_data *remove_entry)
6785 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6786 struct LIST_HEAD_TYPE *list_head;
6787 struct ice_adv_rule_info rinfo;
6788 struct ice_switch_info *sw;
6790 sw = hw->switch_info;
6791 if (!sw->recp_list[remove_entry->rid].recp_created)
6792 return ICE_ERR_PARAM;
6793 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6794 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6796 if (list_itr->rule_info.fltr_rule_id ==
6797 remove_entry->rule_id) {
6798 rinfo = list_itr->rule_info;
6799 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6800 return ice_rem_adv_rule(hw, list_itr->lkups,
6801 list_itr->lkups_cnt, &rinfo);
6804 return ICE_ERR_PARAM;
6808 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6810 * @hw: pointer to the hardware structure
6811 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6813 * This function is used to remove all the rules for a given VSI and as soon
6814 * as removing a rule fails, it will return immediately with the error code,
6815 * else it will return ICE_SUCCESS
6818 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6820 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6821 struct ice_vsi_list_map_info *map_info;
6822 struct LIST_HEAD_TYPE *list_head;
6823 struct ice_adv_rule_info rinfo;
6824 struct ice_switch_info *sw;
6825 enum ice_status status;
6826 u16 vsi_list_id = 0;
6829 sw = hw->switch_info;
6830 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6831 if (!sw->recp_list[rid].recp_created)
6833 if (!sw->recp_list[rid].adv_rule)
6835 list_head = &sw->recp_list[rid].filt_rules;
6837 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6838 ice_adv_fltr_mgmt_list_entry, list_entry) {
6839 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6844 rinfo = list_itr->rule_info;
6845 rinfo.sw_act.vsi_handle = vsi_handle;
6846 status = ice_rem_adv_rule(hw, list_itr->lkups,
6847 list_itr->lkups_cnt, &rinfo);
6857 * ice_replay_fltr - Replay all the filters stored by a specific list head
6858 * @hw: pointer to the hardware structure
6859 * @list_head: list for which filters needs to be replayed
6860 * @recp_id: Recipe ID for which rules need to be replayed
6862 static enum ice_status
6863 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6865 struct ice_fltr_mgmt_list_entry *itr;
6866 enum ice_status status = ICE_SUCCESS;
6867 struct ice_sw_recipe *recp_list;
6868 u8 lport = hw->port_info->lport;
6869 struct LIST_HEAD_TYPE l_head;
6871 if (LIST_EMPTY(list_head))
6874 recp_list = &hw->switch_info->recp_list[recp_id];
6875 /* Move entries from the given list_head to a temporary l_head so that
6876 * they can be replayed. Otherwise when trying to re-add the same
6877 * filter, the function will return already exists
6879 LIST_REPLACE_INIT(list_head, &l_head);
6881 /* Mark the given list_head empty by reinitializing it so filters
6882 * could be added again by *handler
6884 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6886 struct ice_fltr_list_entry f_entry;
6888 f_entry.fltr_info = itr->fltr_info;
6889 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6890 status = ice_add_rule_internal(hw, recp_list, lport,
6892 if (status != ICE_SUCCESS)
6897 /* Add a filter per VSI separately */
6902 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6904 if (!ice_is_vsi_valid(hw, vsi_handle))
6907 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6908 f_entry.fltr_info.vsi_handle = vsi_handle;
6909 f_entry.fltr_info.fwd_id.hw_vsi_id =
6910 ice_get_hw_vsi_num(hw, vsi_handle);
6911 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6912 if (recp_id == ICE_SW_LKUP_VLAN)
6913 status = ice_add_vlan_internal(hw, recp_list,
6916 status = ice_add_rule_internal(hw, recp_list,
6919 if (status != ICE_SUCCESS)
6924 /* Clear the filter management list */
6925 ice_rem_sw_rule_info(hw, &l_head);
6930 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6931 * @hw: pointer to the hardware structure
6933 * NOTE: This function does not clean up partially added filters on error.
6934 * It is up to caller of the function to issue a reset or fail early.
6936 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6938 struct ice_switch_info *sw = hw->switch_info;
6939 enum ice_status status = ICE_SUCCESS;
6942 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6943 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6945 status = ice_replay_fltr(hw, i, head);
6946 if (status != ICE_SUCCESS)
6953 * ice_replay_vsi_fltr - Replay filters for requested VSI
6954 * @hw: pointer to the hardware structure
6955 * @vsi_handle: driver VSI handle
6956 * @recp_id: Recipe ID for which rules need to be replayed
6957 * @list_head: list for which filters need to be replayed
6959 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6960 * It is required to pass valid VSI handle.
6962 static enum ice_status
6963 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6964 struct LIST_HEAD_TYPE *list_head)
6966 struct ice_fltr_mgmt_list_entry *itr;
6967 enum ice_status status = ICE_SUCCESS;
6968 struct ice_sw_recipe *recp_list;
6971 if (LIST_EMPTY(list_head))
6973 recp_list = &hw->switch_info->recp_list[recp_id];
6974 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6976 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6978 struct ice_fltr_list_entry f_entry;
6980 f_entry.fltr_info = itr->fltr_info;
6981 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6982 itr->fltr_info.vsi_handle == vsi_handle) {
6983 /* update the src in case it is VSI num */
6984 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6985 f_entry.fltr_info.src = hw_vsi_id;
6986 status = ice_add_rule_internal(hw, recp_list,
6987 hw->port_info->lport,
6989 if (status != ICE_SUCCESS)
6993 if (!itr->vsi_list_info ||
6994 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6996 /* Clearing it so that the logic can add it back */
6997 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6998 f_entry.fltr_info.vsi_handle = vsi_handle;
6999 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7000 /* update the src in case it is VSI num */
7001 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7002 f_entry.fltr_info.src = hw_vsi_id;
7003 if (recp_id == ICE_SW_LKUP_VLAN)
7004 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7006 status = ice_add_rule_internal(hw, recp_list,
7007 hw->port_info->lport,
7009 if (status != ICE_SUCCESS)
7017 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7018 * @hw: pointer to the hardware structure
7019 * @vsi_handle: driver VSI handle
7020 * @list_head: list for which filters need to be replayed
7022 * Replay the advanced rule for the given VSI.
7024 static enum ice_status
7025 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7026 struct LIST_HEAD_TYPE *list_head)
7028 struct ice_rule_query_data added_entry = { 0 };
7029 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7030 enum ice_status status = ICE_SUCCESS;
7032 if (LIST_EMPTY(list_head))
7034 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7036 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7037 u16 lk_cnt = adv_fltr->lkups_cnt;
7039 if (vsi_handle != rinfo->sw_act.vsi_handle)
7041 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7050 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7051 * @hw: pointer to the hardware structure
7052 * @vsi_handle: driver VSI handle
7054 * Replays filters for requested VSI via vsi_handle.
7056 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7058 struct ice_switch_info *sw = hw->switch_info;
7059 enum ice_status status;
7062 /* Update the recipes that were created */
7063 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7064 struct LIST_HEAD_TYPE *head;
7066 head = &sw->recp_list[i].filt_replay_rules;
7067 if (!sw->recp_list[i].adv_rule)
7068 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7070 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7071 if (status != ICE_SUCCESS)
7079 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7080 * @hw: pointer to the HW struct
7082 * Deletes the filter replay rules.
7084 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7086 struct ice_switch_info *sw = hw->switch_info;
7092 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7093 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7094 struct LIST_HEAD_TYPE *l_head;
7096 l_head = &sw->recp_list[i].filt_replay_rules;
7097 if (!sw->recp_list[i].adv_rule)
7098 ice_rem_sw_rule_info(hw, l_head);
7100 ice_rem_adv_rule_info(hw, l_head);