1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 #define DUMMY_ETH_HDR_LEN 16
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
38 (sizeof(struct ice_aqc_sw_rules_elem) - \
39 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
40 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
41 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
42 (sizeof(struct ice_aqc_sw_rules_elem) - \
43 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
44 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
45 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
46 (sizeof(struct ice_aqc_sw_rules_elem) - \
47 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
48 sizeof(struct ice_sw_rule_lg_act) - \
49 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
50 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
51 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
52 (sizeof(struct ice_aqc_sw_rules_elem) - \
53 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
54 sizeof(struct ice_sw_rule_vsi_list) - \
55 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
56 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
58 struct ice_dummy_pkt_offsets {
59 enum ice_protocol_type type;
60 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
63 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
66 { ICE_IPV4_OFOS, 14 },
71 { ICE_PROTOCOL_LAST, 0 },
74 static const u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x06, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
108 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
119 static const u8 dummy_gre_udp_packet[] = {
120 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x08, 0x00, /* ICE_ETYPE_OL 12 */
126 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
127 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x2F, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
132 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
133 0x00, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
136 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00,
140 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
141 0x00, 0x00, 0x00, 0x00,
142 0x00, 0x11, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
147 0x00, 0x08, 0x00, 0x00,
150 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
152 { ICE_ETYPE_OL, 12 },
153 { ICE_IPV4_OFOS, 14 },
157 { ICE_VXLAN_GPE, 42 },
161 { ICE_PROTOCOL_LAST, 0 },
164 static const u8 dummy_udp_tun_tcp_packet[] = {
165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
169 0x08, 0x00, /* ICE_ETYPE_OL 12 */
171 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
172 0x00, 0x01, 0x00, 0x00,
173 0x40, 0x11, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
178 0x00, 0x46, 0x00, 0x00,
180 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
181 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
184 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00,
188 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
189 0x00, 0x01, 0x00, 0x00,
190 0x40, 0x06, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00,
192 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
197 0x50, 0x02, 0x20, 0x00,
198 0x00, 0x00, 0x00, 0x00
201 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
203 { ICE_ETYPE_OL, 12 },
204 { ICE_IPV4_OFOS, 14 },
208 { ICE_VXLAN_GPE, 42 },
211 { ICE_UDP_ILOS, 84 },
212 { ICE_PROTOCOL_LAST, 0 },
215 static const u8 dummy_udp_tun_udp_packet[] = {
216 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
220 0x08, 0x00, /* ICE_ETYPE_OL 12 */
222 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
223 0x00, 0x01, 0x00, 0x00,
224 0x00, 0x11, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00,
226 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
229 0x00, 0x3a, 0x00, 0x00,
231 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
232 0x00, 0x00, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
235 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00,
239 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
240 0x00, 0x01, 0x00, 0x00,
241 0x00, 0x11, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
246 0x00, 0x08, 0x00, 0x00,
249 /* offset info for MAC + IPv4 + UDP dummy packet */
250 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
252 { ICE_ETYPE_OL, 12 },
253 { ICE_IPV4_OFOS, 14 },
254 { ICE_UDP_ILOS, 34 },
255 { ICE_PROTOCOL_LAST, 0 },
258 /* Dummy packet for MAC + IPv4 + UDP */
259 static const u8 dummy_udp_packet[] = {
260 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
261 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00,
264 0x08, 0x00, /* ICE_ETYPE_OL 12 */
266 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
267 0x00, 0x01, 0x00, 0x00,
268 0x00, 0x11, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
273 0x00, 0x08, 0x00, 0x00,
275 0x00, 0x00, /* 2 bytes for 4 byte alignment */
278 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
279 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
281 { ICE_ETYPE_OL, 12 },
282 { ICE_VLAN_OFOS, 14 },
283 { ICE_IPV4_OFOS, 18 },
284 { ICE_UDP_ILOS, 38 },
285 { ICE_PROTOCOL_LAST, 0 },
288 /* C-tag (801.1Q), IPv4:UDP dummy packet */
289 static const u8 dummy_vlan_udp_packet[] = {
290 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
291 0x00, 0x00, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00,
294 0x81, 0x00, /* ICE_ETYPE_OL 12 */
296 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
298 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
299 0x00, 0x01, 0x00, 0x00,
300 0x00, 0x11, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
305 0x00, 0x08, 0x00, 0x00,
307 0x00, 0x00, /* 2 bytes for 4 byte alignment */
310 /* offset info for MAC + IPv4 + TCP dummy packet */
311 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
313 { ICE_ETYPE_OL, 12 },
314 { ICE_IPV4_OFOS, 14 },
316 { ICE_PROTOCOL_LAST, 0 },
319 /* Dummy packet for MAC + IPv4 + TCP */
320 static const u8 dummy_tcp_packet[] = {
321 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
322 0x00, 0x00, 0x00, 0x00,
323 0x00, 0x00, 0x00, 0x00,
325 0x08, 0x00, /* ICE_ETYPE_OL 12 */
327 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
328 0x00, 0x01, 0x00, 0x00,
329 0x00, 0x06, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x50, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, /* 2 bytes for 4 byte alignment */
342 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
343 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
345 { ICE_ETYPE_OL, 12 },
346 { ICE_VLAN_OFOS, 14 },
347 { ICE_IPV4_OFOS, 18 },
349 { ICE_PROTOCOL_LAST, 0 },
352 /* C-tag (801.1Q), IPv4:TCP dummy packet */
353 static const u8 dummy_vlan_tcp_packet[] = {
354 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
355 0x00, 0x00, 0x00, 0x00,
356 0x00, 0x00, 0x00, 0x00,
358 0x81, 0x00, /* ICE_ETYPE_OL 12 */
360 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
362 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
363 0x00, 0x01, 0x00, 0x00,
364 0x00, 0x06, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x50, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, /* 2 bytes for 4 byte alignment */
377 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
379 { ICE_ETYPE_OL, 12 },
380 { ICE_IPV6_OFOS, 14 },
382 { ICE_PROTOCOL_LAST, 0 },
385 static const u8 dummy_tcp_ipv6_packet[] = {
386 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
387 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, 0x00, 0x00,
390 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
392 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
393 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x50, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x00, 0x00, /* 2 bytes for 4 byte alignment */
412 /* C-tag (802.1Q): IPv6 + TCP */
413 static const struct ice_dummy_pkt_offsets
414 dummy_vlan_tcp_ipv6_packet_offsets[] = {
416 { ICE_ETYPE_OL, 12 },
417 { ICE_VLAN_OFOS, 14 },
418 { ICE_IPV6_OFOS, 18 },
420 { ICE_PROTOCOL_LAST, 0 },
423 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
424 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
425 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x81, 0x00, /* ICE_ETYPE_OL 12 */
431 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
433 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
434 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
447 0x50, 0x00, 0x00, 0x00,
448 0x00, 0x00, 0x00, 0x00,
450 0x00, 0x00, /* 2 bytes for 4 byte alignment */
454 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
456 { ICE_ETYPE_OL, 12 },
457 { ICE_IPV6_OFOS, 14 },
458 { ICE_UDP_ILOS, 54 },
459 { ICE_PROTOCOL_LAST, 0 },
462 /* IPv6 + UDP dummy packet */
463 static const u8 dummy_udp_ipv6_packet[] = {
464 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
465 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, 0x00, 0x00,
468 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
470 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
471 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
481 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
482 0x00, 0x08, 0x00, 0x00,
484 0x00, 0x00, /* 2 bytes for 4 byte alignment */
487 /* C-tag (802.1Q): IPv6 + UDP */
488 static const struct ice_dummy_pkt_offsets
489 dummy_vlan_udp_ipv6_packet_offsets[] = {
491 { ICE_ETYPE_OL, 12 },
492 { ICE_VLAN_OFOS, 14 },
493 { ICE_IPV6_OFOS, 18 },
494 { ICE_UDP_ILOS, 58 },
495 { ICE_PROTOCOL_LAST, 0 },
498 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
499 static const u8 dummy_vlan_udp_ipv6_packet[] = {
500 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
501 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00,
504 0x81, 0x00, /* ICE_ETYPE_OL 12 */
506 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
508 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
509 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
520 0x00, 0x08, 0x00, 0x00,
522 0x00, 0x00, /* 2 bytes for 4 byte alignment */
525 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
527 { ICE_IPV4_OFOS, 14 },
530 { ICE_PROTOCOL_LAST, 0 },
533 static const u8 dummy_udp_gtp_packet[] = {
534 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x00,
539 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
540 0x00, 0x00, 0x00, 0x00,
541 0x00, 0x11, 0x00, 0x00,
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
546 0x00, 0x1c, 0x00, 0x00,
548 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x85,
552 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
553 0x00, 0x00, 0x00, 0x00,
556 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
558 { ICE_ETYPE_OL, 12 },
559 { ICE_VLAN_OFOS, 14},
561 { ICE_PROTOCOL_LAST, 0 },
564 static const u8 dummy_pppoe_ipv4_packet[] = {
565 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
566 0x00, 0x00, 0x00, 0x00,
567 0x00, 0x00, 0x00, 0x00,
569 0x81, 0x00, /* ICE_ETYPE_OL 12 */
571 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
573 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
576 0x00, 0x21, /* PPP Link Layer 24 */
578 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
587 static const u8 dummy_pppoe_ipv6_packet[] = {
588 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
592 0x81, 0x00, /* ICE_ETYPE_OL 12 */
594 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
596 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
599 0x00, 0x57, /* PPP Link Layer 24 */
601 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
615 /* this is a recipe to profile association bitmap */
616 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
617 ICE_MAX_NUM_PROFILES);
619 /* this is a profile to recipe association bitmap */
620 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
621 ICE_MAX_NUM_RECIPES);
623 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
626 * ice_collect_result_idx - copy result index values
627 * @buf: buffer that contains the result index
628 * @recp: the recipe struct to copy data into
630 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
631 struct ice_sw_recipe *recp)
633 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
634 ice_set_bit(buf->content.result_indx &
635 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
639 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
640 * @hw: pointer to hardware structure
641 * @recps: struct that we need to populate
642 * @rid: recipe ID that we are populating
643 * @refresh_required: true if we should get recipe to profile mapping from FW
645 * This function is used to populate all the necessary entries into our
646 * bookkeeping so that we have a current list of all the recipes that are
647 * programmed in the firmware.
649 static enum ice_status
650 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
651 bool *refresh_required)
653 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
654 struct ice_aqc_recipe_data_elem *tmp;
655 u16 num_recps = ICE_MAX_NUM_RECIPES;
656 struct ice_prot_lkup_ext *lkup_exts;
657 enum ice_status status;
661 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
663 /* we need a buffer big enough to accommodate all the recipes */
664 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
665 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
667 return ICE_ERR_NO_MEMORY;
669 tmp[0].recipe_indx = rid;
670 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
671 /* non-zero status meaning recipe doesn't exist */
675 /* Get recipe to profile map so that we can get the fv from lkups that
676 * we read for a recipe from FW. Since we want to minimize the number of
677 * times we make this FW call, just make one call and cache the copy
678 * until a new recipe is added. This operation is only required the
679 * first time to get the changes from FW. Then to search existing
680 * entries we don't need to update the cache again until another recipe
683 if (*refresh_required) {
684 ice_get_recp_to_prof_map(hw);
685 *refresh_required = false;
688 /* Start populating all the entries for recps[rid] based on lkups from
689 * firmware. Note that we are only creating the root recipe in our
692 lkup_exts = &recps[rid].lkup_exts;
694 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
695 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
696 struct ice_recp_grp_entry *rg_entry;
697 u8 i, prof, idx, prot = 0;
701 rg_entry = (struct ice_recp_grp_entry *)
702 ice_malloc(hw, sizeof(*rg_entry));
704 status = ICE_ERR_NO_MEMORY;
708 idx = root_bufs.recipe_indx;
709 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
711 /* Mark all result indices in this chain */
712 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
713 ice_set_bit(root_bufs.content.result_indx &
714 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
716 /* get the first profile that is associated with rid */
717 prof = ice_find_first_bit(recipe_to_profile[idx],
718 ICE_MAX_NUM_PROFILES);
719 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
720 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
722 rg_entry->fv_idx[i] = lkup_indx;
723 rg_entry->fv_mask[i] =
724 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
726 /* If the recipe is a chained recipe then all its
727 * child recipe's result will have a result index.
728 * To fill fv_words we should not use those result
729 * index, we only need the protocol ids and offsets.
730 * We will skip all the fv_idx which stores result
731 * index in them. We also need to skip any fv_idx which
732 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
733 * valid offset value.
735 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
736 rg_entry->fv_idx[i]) ||
737 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
738 rg_entry->fv_idx[i] == 0)
741 ice_find_prot_off(hw, ICE_BLK_SW, prof,
742 rg_entry->fv_idx[i], &prot, &off);
743 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
744 lkup_exts->fv_words[fv_word_idx].off = off;
747 /* populate rg_list with the data from the child entry of this
750 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
752 /* Propagate some data to the recipe database */
753 recps[idx].is_root = !!is_root;
754 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
755 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
756 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
757 recps[idx].chain_idx = root_bufs.content.result_indx &
758 ~ICE_AQ_RECIPE_RESULT_EN;
759 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
761 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
767 /* Only do the following for root recipes entries */
768 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
769 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
770 recps[idx].root_rid = root_bufs.content.rid &
771 ~ICE_AQ_RECIPE_ID_IS_ROOT;
772 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
775 /* Complete initialization of the root recipe entry */
776 lkup_exts->n_val_words = fv_word_idx;
777 recps[rid].big_recp = (num_recps > 1);
778 recps[rid].n_grp_count = (u8)num_recps;
779 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
780 ice_memdup(hw, tmp, recps[rid].n_grp_count *
781 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
782 if (!recps[rid].root_buf)
785 /* Copy result indexes */
786 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
787 recps[rid].recp_created = true;
795 * ice_get_recp_to_prof_map - updates recipe to profile mapping
796 * @hw: pointer to hardware structure
798 * This function is used to populate recipe_to_profile matrix where index to
799 * this array is the recipe ID and the element is the mapping of which profiles
800 * is this recipe mapped to.
803 ice_get_recp_to_prof_map(struct ice_hw *hw)
805 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
808 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
811 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
812 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
813 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
815 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
816 ICE_MAX_NUM_RECIPES);
817 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
818 if (ice_is_bit_set(r_bitmap, j))
819 ice_set_bit(i, recipe_to_profile[j]);
824 * ice_init_def_sw_recp - initialize the recipe book keeping tables
825 * @hw: pointer to the HW struct
826 * @recp_list: pointer to sw recipe list
828 * Allocate memory for the entire recipe table and initialize the structures/
829 * entries corresponding to basic recipes.
832 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
834 struct ice_sw_recipe *recps;
837 recps = (struct ice_sw_recipe *)
838 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
840 return ICE_ERR_NO_MEMORY;
842 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
843 recps[i].root_rid = i;
844 INIT_LIST_HEAD(&recps[i].filt_rules);
845 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
846 INIT_LIST_HEAD(&recps[i].rg_list);
847 ice_init_lock(&recps[i].filt_rule_lock);
856 * ice_aq_get_sw_cfg - get switch configuration
857 * @hw: pointer to the hardware structure
858 * @buf: pointer to the result buffer
859 * @buf_size: length of the buffer available for response
860 * @req_desc: pointer to requested descriptor
861 * @num_elems: pointer to number of elements
862 * @cd: pointer to command details structure or NULL
864 * Get switch configuration (0x0200) to be placed in 'buff'.
865 * This admin command returns information such as initial VSI/port number
866 * and switch ID it belongs to.
868 * NOTE: *req_desc is both an input/output parameter.
869 * The caller of this function first calls this function with *request_desc set
870 * to 0. If the response from f/w has *req_desc set to 0, all the switch
871 * configuration information has been returned; if non-zero (meaning not all
872 * the information was returned), the caller should call this function again
873 * with *req_desc set to the previous value returned by f/w to get the
874 * next block of switch configuration information.
876 * *num_elems is output only parameter. This reflects the number of elements
877 * in response buffer. The caller of this function to use *num_elems while
878 * parsing the response buffer.
880 static enum ice_status
881 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
882 u16 buf_size, u16 *req_desc, u16 *num_elems,
883 struct ice_sq_cd *cd)
885 struct ice_aqc_get_sw_cfg *cmd;
886 enum ice_status status;
887 struct ice_aq_desc desc;
889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
890 cmd = &desc.params.get_sw_conf;
891 cmd->element = CPU_TO_LE16(*req_desc);
893 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
895 *req_desc = LE16_TO_CPU(cmd->element);
896 *num_elems = LE16_TO_CPU(cmd->num_elems);
903 * ice_alloc_sw - allocate resources specific to switch
904 * @hw: pointer to the HW struct
905 * @ena_stats: true to turn on VEB stats
906 * @shared_res: true for shared resource, false for dedicated resource
907 * @sw_id: switch ID returned
908 * @counter_id: VEB counter ID returned
910 * allocates switch resources (SWID and VEB counter) (0x0208)
913 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
916 struct ice_aqc_alloc_free_res_elem *sw_buf;
917 struct ice_aqc_res_elem *sw_ele;
918 enum ice_status status;
921 buf_len = sizeof(*sw_buf);
922 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
923 ice_malloc(hw, buf_len);
925 return ICE_ERR_NO_MEMORY;
927 /* Prepare buffer for switch ID.
928 * The number of resource entries in buffer is passed as 1 since only a
929 * single switch/VEB instance is allocated, and hence a single sw_id
932 sw_buf->num_elems = CPU_TO_LE16(1);
934 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
935 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
936 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
938 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
939 ice_aqc_opc_alloc_res, NULL);
942 goto ice_alloc_sw_exit;
944 sw_ele = &sw_buf->elem[0];
945 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
948 /* Prepare buffer for VEB Counter */
949 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
950 struct ice_aqc_alloc_free_res_elem *counter_buf;
951 struct ice_aqc_res_elem *counter_ele;
953 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
954 ice_malloc(hw, buf_len);
956 status = ICE_ERR_NO_MEMORY;
957 goto ice_alloc_sw_exit;
960 /* The number of resource entries in buffer is passed as 1 since
961 * only a single switch/VEB instance is allocated, and hence a
962 * single VEB counter is requested.
964 counter_buf->num_elems = CPU_TO_LE16(1);
965 counter_buf->res_type =
966 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
967 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
968 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
972 ice_free(hw, counter_buf);
973 goto ice_alloc_sw_exit;
975 counter_ele = &counter_buf->elem[0];
976 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
977 ice_free(hw, counter_buf);
981 ice_free(hw, sw_buf);
986 * ice_free_sw - free resources specific to switch
987 * @hw: pointer to the HW struct
988 * @sw_id: switch ID returned
989 * @counter_id: VEB counter ID returned
991 * free switch resources (SWID and VEB counter) (0x0209)
993 * NOTE: This function frees multiple resources. It continues
994 * releasing other resources even after it encounters error.
995 * The error code returned is the last error it encountered.
997 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
999 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1000 enum ice_status status, ret_status;
1003 buf_len = sizeof(*sw_buf);
1004 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1005 ice_malloc(hw, buf_len);
1007 return ICE_ERR_NO_MEMORY;
1009 /* Prepare buffer to free for switch ID res.
1010 * The number of resource entries in buffer is passed as 1 since only a
1011 * single switch/VEB instance is freed, and hence a single sw_id
1014 sw_buf->num_elems = CPU_TO_LE16(1);
1015 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1016 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1018 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1019 ice_aqc_opc_free_res, NULL);
1022 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1024 /* Prepare buffer to free for VEB Counter resource */
1025 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1026 ice_malloc(hw, buf_len);
1028 ice_free(hw, sw_buf);
1029 return ICE_ERR_NO_MEMORY;
1032 /* The number of resource entries in buffer is passed as 1 since only a
1033 * single switch/VEB instance is freed, and hence a single VEB counter
1036 counter_buf->num_elems = CPU_TO_LE16(1);
1037 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1038 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1040 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1041 ice_aqc_opc_free_res, NULL);
1043 ice_debug(hw, ICE_DBG_SW,
1044 "VEB counter resource could not be freed\n");
1045 ret_status = status;
1048 ice_free(hw, counter_buf);
1049 ice_free(hw, sw_buf);
1055 * @hw: pointer to the HW struct
1056 * @vsi_ctx: pointer to a VSI context struct
1057 * @cd: pointer to command details structure or NULL
1059 * Add a VSI context to the hardware (0x0210)
1062 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1063 struct ice_sq_cd *cd)
1065 struct ice_aqc_add_update_free_vsi_resp *res;
1066 struct ice_aqc_add_get_update_free_vsi *cmd;
1067 struct ice_aq_desc desc;
1068 enum ice_status status;
1070 cmd = &desc.params.vsi_cmd;
1071 res = &desc.params.add_update_free_vsi_res;
1073 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1075 if (!vsi_ctx->alloc_from_pool)
1076 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1077 ICE_AQ_VSI_IS_VALID);
1079 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1081 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1083 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1084 sizeof(vsi_ctx->info), cd);
1087 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1088 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1089 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1097 * @hw: pointer to the HW struct
1098 * @vsi_ctx: pointer to a VSI context struct
1099 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1100 * @cd: pointer to command details structure or NULL
1102 * Free VSI context info from hardware (0x0213)
1105 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1106 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1108 struct ice_aqc_add_update_free_vsi_resp *resp;
1109 struct ice_aqc_add_get_update_free_vsi *cmd;
1110 struct ice_aq_desc desc;
1111 enum ice_status status;
1113 cmd = &desc.params.vsi_cmd;
1114 resp = &desc.params.add_update_free_vsi_res;
1116 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1118 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1120 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1122 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1124 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1125 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1133 * @hw: pointer to the HW struct
1134 * @vsi_ctx: pointer to a VSI context struct
1135 * @cd: pointer to command details structure or NULL
1137 * Update VSI context in the hardware (0x0211)
1140 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1141 struct ice_sq_cd *cd)
1143 struct ice_aqc_add_update_free_vsi_resp *resp;
1144 struct ice_aqc_add_get_update_free_vsi *cmd;
1145 struct ice_aq_desc desc;
1146 enum ice_status status;
1148 cmd = &desc.params.vsi_cmd;
1149 resp = &desc.params.add_update_free_vsi_res;
1151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1153 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1155 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1157 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1158 sizeof(vsi_ctx->info), cd);
1161 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1162 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1169 * ice_is_vsi_valid - check whether the VSI is valid or not
1170 * @hw: pointer to the HW struct
1171 * @vsi_handle: VSI handle
1173 * check whether the VSI is valid or not
1175 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1177 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1181 * ice_get_hw_vsi_num - return the HW VSI number
1182 * @hw: pointer to the HW struct
1183 * @vsi_handle: VSI handle
1185 * return the HW VSI number
1186 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1188 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1190 return hw->vsi_ctx[vsi_handle]->vsi_num;
1194 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1195 * @hw: pointer to the HW struct
1196 * @vsi_handle: VSI handle
1198 * return the VSI context entry for a given VSI handle
1200 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1202 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1206 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1207 * @hw: pointer to the HW struct
1208 * @vsi_handle: VSI handle
1209 * @vsi: VSI context pointer
1211 * save the VSI context entry for a given VSI handle
1214 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1216 hw->vsi_ctx[vsi_handle] = vsi;
1220 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1221 * @hw: pointer to the HW struct
1222 * @vsi_handle: VSI handle
1224 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1226 struct ice_vsi_ctx *vsi;
1229 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1232 ice_for_each_traffic_class(i) {
1233 if (vsi->lan_q_ctx[i]) {
1234 ice_free(hw, vsi->lan_q_ctx[i]);
1235 vsi->lan_q_ctx[i] = NULL;
1241 * ice_clear_vsi_ctx - clear the VSI context entry
1242 * @hw: pointer to the HW struct
1243 * @vsi_handle: VSI handle
1245 * clear the VSI context entry
1247 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1249 struct ice_vsi_ctx *vsi;
1251 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1253 ice_clear_vsi_q_ctx(hw, vsi_handle);
1255 hw->vsi_ctx[vsi_handle] = NULL;
1260 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1261 * @hw: pointer to the HW struct
1263 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1267 for (i = 0; i < ICE_MAX_VSI; i++)
1268 ice_clear_vsi_ctx(hw, i);
1272 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1273 * @hw: pointer to the HW struct
1274 * @vsi_handle: unique VSI handle provided by drivers
1275 * @vsi_ctx: pointer to a VSI context struct
1276 * @cd: pointer to command details structure or NULL
1278 * Add a VSI context to the hardware also add it into the VSI handle list.
1279 * If this function gets called after reset for existing VSIs then update
1280 * with the new HW VSI number in the corresponding VSI handle list entry.
1283 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1284 struct ice_sq_cd *cd)
1286 struct ice_vsi_ctx *tmp_vsi_ctx;
1287 enum ice_status status;
1289 if (vsi_handle >= ICE_MAX_VSI)
1290 return ICE_ERR_PARAM;
1291 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1294 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1296 /* Create a new VSI context */
1297 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1298 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1300 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1301 return ICE_ERR_NO_MEMORY;
1303 *tmp_vsi_ctx = *vsi_ctx;
1305 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1307 /* update with new HW VSI num */
1308 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1315 * ice_free_vsi- free VSI context from hardware and VSI handle list
1316 * @hw: pointer to the HW struct
1317 * @vsi_handle: unique VSI handle
1318 * @vsi_ctx: pointer to a VSI context struct
1319 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1320 * @cd: pointer to command details structure or NULL
1322 * Free VSI context info from hardware as well as from VSI handle list
1325 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1326 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1328 enum ice_status status;
1330 if (!ice_is_vsi_valid(hw, vsi_handle))
1331 return ICE_ERR_PARAM;
1332 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1333 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1335 ice_clear_vsi_ctx(hw, vsi_handle);
1341 * @hw: pointer to the HW struct
1342 * @vsi_handle: unique VSI handle
1343 * @vsi_ctx: pointer to a VSI context struct
1344 * @cd: pointer to command details structure or NULL
1346 * Update VSI context in the hardware
1349 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1350 struct ice_sq_cd *cd)
1352 if (!ice_is_vsi_valid(hw, vsi_handle))
1353 return ICE_ERR_PARAM;
1354 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1355 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1359 * ice_aq_get_vsi_params
1360 * @hw: pointer to the HW struct
1361 * @vsi_ctx: pointer to a VSI context struct
1362 * @cd: pointer to command details structure or NULL
1364 * Get VSI context info from hardware (0x0212)
1367 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1368 struct ice_sq_cd *cd)
1370 struct ice_aqc_add_get_update_free_vsi *cmd;
1371 struct ice_aqc_get_vsi_resp *resp;
1372 struct ice_aq_desc desc;
1373 enum ice_status status;
1375 cmd = &desc.params.vsi_cmd;
1376 resp = &desc.params.get_vsi_resp;
1378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1380 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1382 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1383 sizeof(vsi_ctx->info), cd);
1385 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1387 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1388 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1395 * ice_aq_add_update_mir_rule - add/update a mirror rule
1396 * @hw: pointer to the HW struct
1397 * @rule_type: Rule Type
1398 * @dest_vsi: VSI number to which packets will be mirrored
1399 * @count: length of the list
1400 * @mr_buf: buffer for list of mirrored VSI numbers
1401 * @cd: pointer to command details structure or NULL
1404 * Add/Update Mirror Rule (0x260).
1407 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1408 u16 count, struct ice_mir_rule_buf *mr_buf,
1409 struct ice_sq_cd *cd, u16 *rule_id)
1411 struct ice_aqc_add_update_mir_rule *cmd;
1412 struct ice_aq_desc desc;
1413 enum ice_status status;
1414 __le16 *mr_list = NULL;
1417 switch (rule_type) {
1418 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1419 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1420 /* Make sure count and mr_buf are set for these rule_types */
1421 if (!(count && mr_buf))
1422 return ICE_ERR_PARAM;
1424 buf_size = count * sizeof(__le16);
1425 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1427 return ICE_ERR_NO_MEMORY;
1429 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1430 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1431 /* Make sure count and mr_buf are not set for these
1434 if (count || mr_buf)
1435 return ICE_ERR_PARAM;
1438 ice_debug(hw, ICE_DBG_SW,
1439 "Error due to unsupported rule_type %u\n", rule_type);
1440 return ICE_ERR_OUT_OF_RANGE;
1443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1445 /* Pre-process 'mr_buf' items for add/update of virtual port
1446 * ingress/egress mirroring (but not physical port ingress/egress
1452 for (i = 0; i < count; i++) {
1455 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1457 /* Validate specified VSI number, make sure it is less
1458 * than ICE_MAX_VSI, if not return with error.
1460 if (id >= ICE_MAX_VSI) {
1461 ice_debug(hw, ICE_DBG_SW,
1462 "Error VSI index (%u) out-of-range\n",
1464 ice_free(hw, mr_list);
1465 return ICE_ERR_OUT_OF_RANGE;
1468 /* add VSI to mirror rule */
1471 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1472 else /* remove VSI from mirror rule */
1473 mr_list[i] = CPU_TO_LE16(id);
1477 cmd = &desc.params.add_update_rule;
1478 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1479 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1480 ICE_AQC_RULE_ID_VALID_M);
1481 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1482 cmd->num_entries = CPU_TO_LE16(count);
1483 cmd->dest = CPU_TO_LE16(dest_vsi);
1485 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1487 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1489 ice_free(hw, mr_list);
1495 * ice_aq_delete_mir_rule - delete a mirror rule
1496 * @hw: pointer to the HW struct
1497 * @rule_id: Mirror rule ID (to be deleted)
1498 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1499 * otherwise it is returned to the shared pool
1500 * @cd: pointer to command details structure or NULL
1502 * Delete Mirror Rule (0x261).
1505 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1506 struct ice_sq_cd *cd)
1508 struct ice_aqc_delete_mir_rule *cmd;
1509 struct ice_aq_desc desc;
1511 /* rule_id should be in the range 0...63 */
1512 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1513 return ICE_ERR_OUT_OF_RANGE;
1515 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1517 cmd = &desc.params.del_rule;
1518 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1519 cmd->rule_id = CPU_TO_LE16(rule_id);
1522 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1524 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1528 * ice_aq_alloc_free_vsi_list
1529 * @hw: pointer to the HW struct
1530 * @vsi_list_id: VSI list ID returned or used for lookup
1531 * @lkup_type: switch rule filter lookup type
1532 * @opc: switch rules population command type - pass in the command opcode
1534 * allocates or free a VSI list resource
1536 static enum ice_status
1537 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1538 enum ice_sw_lkup_type lkup_type,
1539 enum ice_adminq_opc opc)
1541 struct ice_aqc_alloc_free_res_elem *sw_buf;
1542 struct ice_aqc_res_elem *vsi_ele;
1543 enum ice_status status;
1546 buf_len = sizeof(*sw_buf);
1547 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1548 ice_malloc(hw, buf_len);
1550 return ICE_ERR_NO_MEMORY;
1551 sw_buf->num_elems = CPU_TO_LE16(1);
1553 if (lkup_type == ICE_SW_LKUP_MAC ||
1554 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1555 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1556 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1557 lkup_type == ICE_SW_LKUP_PROMISC ||
1558 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1559 lkup_type == ICE_SW_LKUP_LAST) {
1560 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1561 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1563 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1565 status = ICE_ERR_PARAM;
1566 goto ice_aq_alloc_free_vsi_list_exit;
1569 if (opc == ice_aqc_opc_free_res)
1570 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1572 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1574 goto ice_aq_alloc_free_vsi_list_exit;
1576 if (opc == ice_aqc_opc_alloc_res) {
1577 vsi_ele = &sw_buf->elem[0];
1578 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1581 ice_aq_alloc_free_vsi_list_exit:
1582 ice_free(hw, sw_buf);
1587 * ice_aq_set_storm_ctrl - Sets storm control configuration
1588 * @hw: pointer to the HW struct
1589 * @bcast_thresh: represents the upper threshold for broadcast storm control
1590 * @mcast_thresh: represents the upper threshold for multicast storm control
1591 * @ctl_bitmask: storm control control knobs
1593 * Sets the storm control configuration (0x0280)
1596 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1599 struct ice_aqc_storm_cfg *cmd;
1600 struct ice_aq_desc desc;
1602 cmd = &desc.params.storm_conf;
1604 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1606 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1607 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1608 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1610 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1614 * ice_aq_get_storm_ctrl - gets storm control configuration
1615 * @hw: pointer to the HW struct
1616 * @bcast_thresh: represents the upper threshold for broadcast storm control
1617 * @mcast_thresh: represents the upper threshold for multicast storm control
1618 * @ctl_bitmask: storm control control knobs
1620 * Gets the storm control configuration (0x0281)
1623 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1626 enum ice_status status;
1627 struct ice_aq_desc desc;
1629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1631 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1633 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1636 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1639 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1642 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1649 * ice_aq_sw_rules - add/update/remove switch rules
1650 * @hw: pointer to the HW struct
1651 * @rule_list: pointer to switch rule population list
1652 * @rule_list_sz: total size of the rule list in bytes
1653 * @num_rules: number of switch rules in the rule_list
1654 * @opc: switch rules population command type - pass in the command opcode
1655 * @cd: pointer to command details structure or NULL
1657 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1659 static enum ice_status
1660 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1661 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1663 struct ice_aq_desc desc;
1665 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1667 if (opc != ice_aqc_opc_add_sw_rules &&
1668 opc != ice_aqc_opc_update_sw_rules &&
1669 opc != ice_aqc_opc_remove_sw_rules)
1670 return ICE_ERR_PARAM;
1672 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1674 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1675 desc.params.sw_rules.num_rules_fltr_entry_index =
1676 CPU_TO_LE16(num_rules);
1677 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1681 * ice_aq_add_recipe - add switch recipe
1682 * @hw: pointer to the HW struct
1683 * @s_recipe_list: pointer to switch rule population list
1684 * @num_recipes: number of switch recipes in the list
1685 * @cd: pointer to command details structure or NULL
1690 ice_aq_add_recipe(struct ice_hw *hw,
1691 struct ice_aqc_recipe_data_elem *s_recipe_list,
1692 u16 num_recipes, struct ice_sq_cd *cd)
1694 struct ice_aqc_add_get_recipe *cmd;
1695 struct ice_aq_desc desc;
1698 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1699 cmd = &desc.params.add_get_recipe;
1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1702 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1703 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1705 buf_size = num_recipes * sizeof(*s_recipe_list);
1707 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1711 * ice_aq_get_recipe - get switch recipe
1712 * @hw: pointer to the HW struct
1713 * @s_recipe_list: pointer to switch rule population list
1714 * @num_recipes: pointer to the number of recipes (input and output)
1715 * @recipe_root: root recipe number of recipe(s) to retrieve
1716 * @cd: pointer to command details structure or NULL
1720 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1721 * On output, *num_recipes will equal the number of entries returned in
1724 * The caller must supply enough space in s_recipe_list to hold all possible
1725 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1728 ice_aq_get_recipe(struct ice_hw *hw,
1729 struct ice_aqc_recipe_data_elem *s_recipe_list,
1730 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1732 struct ice_aqc_add_get_recipe *cmd;
1733 struct ice_aq_desc desc;
1734 enum ice_status status;
1737 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1738 return ICE_ERR_PARAM;
1740 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1741 cmd = &desc.params.add_get_recipe;
1742 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1744 cmd->return_index = CPU_TO_LE16(recipe_root);
1745 cmd->num_sub_recipes = 0;
1747 buf_size = *num_recipes * sizeof(*s_recipe_list);
1749 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1750 /* cppcheck-suppress constArgument */
1751 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1757 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1758 * @hw: pointer to the HW struct
1759 * @profile_id: package profile ID to associate the recipe with
1760 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1761 * @cd: pointer to command details structure or NULL
1762 * Recipe to profile association (0x0291)
1765 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1766 struct ice_sq_cd *cd)
1768 struct ice_aqc_recipe_to_profile *cmd;
1769 struct ice_aq_desc desc;
1771 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1772 cmd = &desc.params.recipe_to_profile;
1773 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1774 cmd->profile_id = CPU_TO_LE16(profile_id);
1775 /* Set the recipe ID bit in the bitmask to let the device know which
1776 * profile we are associating the recipe to
1778 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1779 ICE_NONDMA_TO_NONDMA);
1781 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1785 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1786 * @hw: pointer to the HW struct
1787 * @profile_id: package profile ID to associate the recipe with
1788 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1789 * @cd: pointer to command details structure or NULL
1790 * Associate profile ID with given recipe (0x0293)
1793 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1794 struct ice_sq_cd *cd)
1796 struct ice_aqc_recipe_to_profile *cmd;
1797 struct ice_aq_desc desc;
1798 enum ice_status status;
1800 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1801 cmd = &desc.params.recipe_to_profile;
1802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1803 cmd->profile_id = CPU_TO_LE16(profile_id);
1805 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1807 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1808 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1814 * ice_alloc_recipe - add recipe resource
1815 * @hw: pointer to the hardware structure
1816 * @rid: recipe ID returned as response to AQ call
1818 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1820 struct ice_aqc_alloc_free_res_elem *sw_buf;
1821 enum ice_status status;
1824 buf_len = sizeof(*sw_buf);
1825 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1827 return ICE_ERR_NO_MEMORY;
1829 sw_buf->num_elems = CPU_TO_LE16(1);
1830 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1831 ICE_AQC_RES_TYPE_S) |
1832 ICE_AQC_RES_TYPE_FLAG_SHARED);
1833 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1834 ice_aqc_opc_alloc_res, NULL);
1836 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1837 ice_free(hw, sw_buf);
1842 /* ice_init_port_info - Initialize port_info with switch configuration data
1843 * @pi: pointer to port_info
1844 * @vsi_port_num: VSI number or port number
1845 * @type: Type of switch element (port or VSI)
1846 * @swid: switch ID of the switch the element is attached to
1847 * @pf_vf_num: PF or VF number
1848 * @is_vf: true if the element is a VF, false otherwise
1851 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1852 u16 swid, u16 pf_vf_num, bool is_vf)
1855 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1856 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1858 pi->pf_vf_num = pf_vf_num;
1860 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1861 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1864 ice_debug(pi->hw, ICE_DBG_SW,
1865 "incorrect VSI/port type received\n");
1870 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1871 * @hw: pointer to the hardware structure
1873 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1875 struct ice_aqc_get_sw_cfg_resp *rbuf;
1876 enum ice_status status;
1883 num_total_ports = 1;
1885 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1886 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1889 return ICE_ERR_NO_MEMORY;
1891 /* Multiple calls to ice_aq_get_sw_cfg may be required
1892 * to get all the switch configuration information. The need
1893 * for additional calls is indicated by ice_aq_get_sw_cfg
1894 * writing a non-zero value in req_desc
1897 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1898 &req_desc, &num_elems, NULL);
1903 for (i = 0; i < num_elems; i++) {
1904 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1905 u16 pf_vf_num, swid, vsi_port_num;
1909 ele = rbuf[i].elements;
1910 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1911 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1913 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1914 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1916 swid = LE16_TO_CPU(ele->swid);
1918 if (LE16_TO_CPU(ele->pf_vf_num) &
1919 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1922 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1923 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1926 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1927 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1928 if (j == num_total_ports) {
1929 ice_debug(hw, ICE_DBG_SW,
1930 "more ports than expected\n");
1931 status = ICE_ERR_CFG;
1934 ice_init_port_info(hw->port_info,
1935 vsi_port_num, res_type, swid,
1943 } while (req_desc && !status);
1946 ice_free(hw, (void *)rbuf);
1951 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1952 * @hw: pointer to the hardware structure
1953 * @fi: filter info structure to fill/update
1955 * This helper function populates the lb_en and lan_en elements of the provided
1956 * ice_fltr_info struct using the switch's type and characteristics of the
1957 * switch rule being configured.
1959 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1963 if ((fi->flag & ICE_FLTR_TX) &&
1964 (fi->fltr_act == ICE_FWD_TO_VSI ||
1965 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1966 fi->fltr_act == ICE_FWD_TO_Q ||
1967 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1968 /* Setting LB for prune actions will result in replicated
1969 * packets to the internal switch that will be dropped.
1971 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1974 /* Set lan_en to TRUE if
1975 * 1. The switch is a VEB AND
1977 * 2.1 The lookup is a directional lookup like ethertype,
1978 * promiscuous, ethertype-MAC, promiscuous-VLAN
1979 * and default-port OR
1980 * 2.2 The lookup is VLAN, OR
1981 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1982 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1986 * The switch is a VEPA.
1988 * In all other cases, the LAN enable has to be set to false.
1991 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1992 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1993 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1994 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1995 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1996 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1997 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1998 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1999 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2000 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2009 * ice_fill_sw_rule - Helper function to fill switch rule structure
2010 * @hw: pointer to the hardware structure
2011 * @f_info: entry containing packet forwarding information
2012 * @s_rule: switch rule structure to be filled in based on mac_entry
2013 * @opc: switch rules population command type - pass in the command opcode
2016 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2017 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2019 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2027 if (opc == ice_aqc_opc_remove_sw_rules) {
2028 s_rule->pdata.lkup_tx_rx.act = 0;
2029 s_rule->pdata.lkup_tx_rx.index =
2030 CPU_TO_LE16(f_info->fltr_rule_id);
2031 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2035 eth_hdr_sz = sizeof(dummy_eth_header);
2036 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2038 /* initialize the ether header with a dummy header */
2039 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2040 ice_fill_sw_info(hw, f_info);
2042 switch (f_info->fltr_act) {
2043 case ICE_FWD_TO_VSI:
2044 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2045 ICE_SINGLE_ACT_VSI_ID_M;
2046 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2047 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2048 ICE_SINGLE_ACT_VALID_BIT;
2050 case ICE_FWD_TO_VSI_LIST:
2051 act |= ICE_SINGLE_ACT_VSI_LIST;
2052 act |= (f_info->fwd_id.vsi_list_id <<
2053 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2054 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2055 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2056 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2057 ICE_SINGLE_ACT_VALID_BIT;
2060 act |= ICE_SINGLE_ACT_TO_Q;
2061 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2062 ICE_SINGLE_ACT_Q_INDEX_M;
2064 case ICE_DROP_PACKET:
2065 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2066 ICE_SINGLE_ACT_VALID_BIT;
2068 case ICE_FWD_TO_QGRP:
2069 q_rgn = f_info->qgrp_size > 0 ?
2070 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2071 act |= ICE_SINGLE_ACT_TO_Q;
2072 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2073 ICE_SINGLE_ACT_Q_INDEX_M;
2074 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2075 ICE_SINGLE_ACT_Q_REGION_M;
2082 act |= ICE_SINGLE_ACT_LB_ENABLE;
2084 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2086 switch (f_info->lkup_type) {
2087 case ICE_SW_LKUP_MAC:
2088 daddr = f_info->l_data.mac.mac_addr;
2090 case ICE_SW_LKUP_VLAN:
2091 vlan_id = f_info->l_data.vlan.vlan_id;
2092 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2093 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2094 act |= ICE_SINGLE_ACT_PRUNE;
2095 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2098 case ICE_SW_LKUP_ETHERTYPE_MAC:
2099 daddr = f_info->l_data.ethertype_mac.mac_addr;
2101 case ICE_SW_LKUP_ETHERTYPE:
2102 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2103 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2105 case ICE_SW_LKUP_MAC_VLAN:
2106 daddr = f_info->l_data.mac_vlan.mac_addr;
2107 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2109 case ICE_SW_LKUP_PROMISC_VLAN:
2110 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2112 case ICE_SW_LKUP_PROMISC:
2113 daddr = f_info->l_data.mac_vlan.mac_addr;
2119 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2120 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2121 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2123 /* Recipe set depending on lookup type */
2124 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2125 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2126 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2129 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2130 ICE_NONDMA_TO_NONDMA);
2132 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2133 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2134 *off = CPU_TO_BE16(vlan_id);
2137 /* Create the switch rule with the final dummy Ethernet header */
2138 if (opc != ice_aqc_opc_update_sw_rules)
2139 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2143 * ice_add_marker_act
2144 * @hw: pointer to the hardware structure
2145 * @m_ent: the management entry for which sw marker needs to be added
2146 * @sw_marker: sw marker to tag the Rx descriptor with
2147 * @l_id: large action resource ID
2149 * Create a large action to hold software marker and update the switch rule
2150 * entry pointed by m_ent with newly created large action
2152 static enum ice_status
2153 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2154 u16 sw_marker, u16 l_id)
2156 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2157 /* For software marker we need 3 large actions
2158 * 1. FWD action: FWD TO VSI or VSI LIST
2159 * 2. GENERIC VALUE action to hold the profile ID
2160 * 3. GENERIC VALUE action to hold the software marker ID
2162 const u16 num_lg_acts = 3;
2163 enum ice_status status;
2169 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2170 return ICE_ERR_PARAM;
2172 /* Create two back-to-back switch rules and submit them to the HW using
2173 * one memory buffer:
2177 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2178 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2179 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2181 return ICE_ERR_NO_MEMORY;
2183 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2185 /* Fill in the first switch rule i.e. large action */
2186 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2187 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2188 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2190 /* First action VSI forwarding or VSI list forwarding depending on how
2193 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2194 m_ent->fltr_info.fwd_id.hw_vsi_id;
2196 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2197 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2198 ICE_LG_ACT_VSI_LIST_ID_M;
2199 if (m_ent->vsi_count > 1)
2200 act |= ICE_LG_ACT_VSI_LIST;
2201 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2203 /* Second action descriptor type */
2204 act = ICE_LG_ACT_GENERIC;
2206 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2207 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2209 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2210 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2212 /* Third action Marker value */
2213 act |= ICE_LG_ACT_GENERIC;
2214 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2215 ICE_LG_ACT_GENERIC_VALUE_M;
2217 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2219 /* call the fill switch rule to fill the lookup Tx Rx structure */
2220 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2221 ice_aqc_opc_update_sw_rules);
2223 /* Update the action to point to the large action ID */
2224 rx_tx->pdata.lkup_tx_rx.act =
2225 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2226 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2227 ICE_SINGLE_ACT_PTR_VAL_M));
2229 /* Use the filter rule ID of the previously created rule with single
2230 * act. Once the update happens, hardware will treat this as large
2233 rx_tx->pdata.lkup_tx_rx.index =
2234 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2236 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2237 ice_aqc_opc_update_sw_rules, NULL);
2239 m_ent->lg_act_idx = l_id;
2240 m_ent->sw_marker_id = sw_marker;
2243 ice_free(hw, lg_act);
2248 * ice_add_counter_act - add/update filter rule with counter action
2249 * @hw: pointer to the hardware structure
2250 * @m_ent: the management entry for which counter needs to be added
2251 * @counter_id: VLAN counter ID returned as part of allocate resource
2252 * @l_id: large action resource ID
2254 static enum ice_status
2255 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2256 u16 counter_id, u16 l_id)
2258 struct ice_aqc_sw_rules_elem *lg_act;
2259 struct ice_aqc_sw_rules_elem *rx_tx;
2260 enum ice_status status;
2261 /* 2 actions will be added while adding a large action counter */
2262 const int num_acts = 2;
2269 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2270 return ICE_ERR_PARAM;
2272 /* Create two back-to-back switch rules and submit them to the HW using
2273 * one memory buffer:
2277 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2278 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2279 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2282 return ICE_ERR_NO_MEMORY;
2284 rx_tx = (struct ice_aqc_sw_rules_elem *)
2285 ((u8 *)lg_act + lg_act_size);
2287 /* Fill in the first switch rule i.e. large action */
2288 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2289 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2290 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2292 /* First action VSI forwarding or VSI list forwarding depending on how
2295 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2296 m_ent->fltr_info.fwd_id.hw_vsi_id;
2298 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2299 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2300 ICE_LG_ACT_VSI_LIST_ID_M;
2301 if (m_ent->vsi_count > 1)
2302 act |= ICE_LG_ACT_VSI_LIST;
2303 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2305 /* Second action counter ID */
2306 act = ICE_LG_ACT_STAT_COUNT;
2307 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2308 ICE_LG_ACT_STAT_COUNT_M;
2309 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2311 /* call the fill switch rule to fill the lookup Tx Rx structure */
2312 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2313 ice_aqc_opc_update_sw_rules);
2315 act = ICE_SINGLE_ACT_PTR;
2316 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2317 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2319 /* Use the filter rule ID of the previously created rule with single
2320 * act. Once the update happens, hardware will treat this as large
2323 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2324 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2326 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2327 ice_aqc_opc_update_sw_rules, NULL);
2329 m_ent->lg_act_idx = l_id;
2330 m_ent->counter_index = counter_id;
2333 ice_free(hw, lg_act);
2338 * ice_create_vsi_list_map
2339 * @hw: pointer to the hardware structure
2340 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2341 * @num_vsi: number of VSI handles in the array
2342 * @vsi_list_id: VSI list ID generated as part of allocate resource
2344 * Helper function to create a new entry of VSI list ID to VSI mapping
2345 * using the given VSI list ID
2347 static struct ice_vsi_list_map_info *
2348 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2351 struct ice_switch_info *sw = hw->switch_info;
2352 struct ice_vsi_list_map_info *v_map;
2355 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2360 v_map->vsi_list_id = vsi_list_id;
2362 for (i = 0; i < num_vsi; i++)
2363 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2365 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2370 * ice_update_vsi_list_rule
2371 * @hw: pointer to the hardware structure
2372 * @vsi_handle_arr: array of VSI handles to form a VSI list
2373 * @num_vsi: number of VSI handles in the array
2374 * @vsi_list_id: VSI list ID generated as part of allocate resource
2375 * @remove: Boolean value to indicate if this is a remove action
2376 * @opc: switch rules population command type - pass in the command opcode
2377 * @lkup_type: lookup type of the filter
2379 * Call AQ command to add a new switch rule or update existing switch rule
2380 * using the given VSI list ID
2382 static enum ice_status
2383 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2384 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2385 enum ice_sw_lkup_type lkup_type)
2387 struct ice_aqc_sw_rules_elem *s_rule;
2388 enum ice_status status;
2394 return ICE_ERR_PARAM;
2396 if (lkup_type == ICE_SW_LKUP_MAC ||
2397 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2398 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2399 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2400 lkup_type == ICE_SW_LKUP_PROMISC ||
2401 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2402 lkup_type == ICE_SW_LKUP_LAST)
2403 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2404 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2405 else if (lkup_type == ICE_SW_LKUP_VLAN)
2406 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2407 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2409 return ICE_ERR_PARAM;
2411 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2412 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2414 return ICE_ERR_NO_MEMORY;
2415 for (i = 0; i < num_vsi; i++) {
2416 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2417 status = ICE_ERR_PARAM;
2420 /* AQ call requires hw_vsi_id(s) */
2421 s_rule->pdata.vsi_list.vsi[i] =
2422 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2425 s_rule->type = CPU_TO_LE16(rule_type);
2426 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2427 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2429 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2432 ice_free(hw, s_rule);
2437 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2438 * @hw: pointer to the HW struct
2439 * @vsi_handle_arr: array of VSI handles to form a VSI list
2440 * @num_vsi: number of VSI handles in the array
2441 * @vsi_list_id: stores the ID of the VSI list to be created
2442 * @lkup_type: switch rule filter's lookup type
2444 static enum ice_status
2445 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2446 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2448 enum ice_status status;
2450 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2451 ice_aqc_opc_alloc_res);
2455 /* Update the newly created VSI list to include the specified VSIs */
2456 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2457 *vsi_list_id, false,
2458 ice_aqc_opc_add_sw_rules, lkup_type);
2462 * ice_create_pkt_fwd_rule
2463 * @hw: pointer to the hardware structure
2464 * @recp_list: corresponding filter management list
2465 * @f_entry: entry containing packet forwarding information
2467 * Create switch rule with given filter information and add an entry
2468 * to the corresponding filter management list to track this switch rule
2471 static enum ice_status
2472 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2473 struct ice_fltr_list_entry *f_entry)
2475 struct ice_fltr_mgmt_list_entry *fm_entry;
2476 struct ice_aqc_sw_rules_elem *s_rule;
2477 enum ice_status status;
2479 s_rule = (struct ice_aqc_sw_rules_elem *)
2480 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2482 return ICE_ERR_NO_MEMORY;
2483 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2484 ice_malloc(hw, sizeof(*fm_entry));
2486 status = ICE_ERR_NO_MEMORY;
2487 goto ice_create_pkt_fwd_rule_exit;
2490 fm_entry->fltr_info = f_entry->fltr_info;
2492 /* Initialize all the fields for the management entry */
2493 fm_entry->vsi_count = 1;
2494 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2495 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2496 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2498 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2499 ice_aqc_opc_add_sw_rules);
2501 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2502 ice_aqc_opc_add_sw_rules, NULL);
2504 ice_free(hw, fm_entry);
2505 goto ice_create_pkt_fwd_rule_exit;
2508 f_entry->fltr_info.fltr_rule_id =
2509 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2510 fm_entry->fltr_info.fltr_rule_id =
2511 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2513 /* The book keeping entries will get removed when base driver
2514 * calls remove filter AQ command
2516 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2518 ice_create_pkt_fwd_rule_exit:
2519 ice_free(hw, s_rule);
2524 * ice_update_pkt_fwd_rule
2525 * @hw: pointer to the hardware structure
2526 * @f_info: filter information for switch rule
2528 * Call AQ command to update a previously created switch rule with a
2531 static enum ice_status
2532 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2534 struct ice_aqc_sw_rules_elem *s_rule;
2535 enum ice_status status;
2537 s_rule = (struct ice_aqc_sw_rules_elem *)
2538 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2540 return ICE_ERR_NO_MEMORY;
2542 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2544 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2546 /* Update switch rule with new rule set to forward VSI list */
2547 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2548 ice_aqc_opc_update_sw_rules, NULL);
2550 ice_free(hw, s_rule);
2555 * ice_update_sw_rule_bridge_mode
2556 * @hw: pointer to the HW struct
2558 * Updates unicast switch filter rules based on VEB/VEPA mode
2560 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2562 struct ice_switch_info *sw = hw->switch_info;
2563 struct ice_fltr_mgmt_list_entry *fm_entry;
2564 enum ice_status status = ICE_SUCCESS;
2565 struct LIST_HEAD_TYPE *rule_head;
2566 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2568 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2569 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2571 ice_acquire_lock(rule_lock);
2572 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2574 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2575 u8 *addr = fi->l_data.mac.mac_addr;
2577 /* Update unicast Tx rules to reflect the selected
2580 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2581 (fi->fltr_act == ICE_FWD_TO_VSI ||
2582 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2583 fi->fltr_act == ICE_FWD_TO_Q ||
2584 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2585 status = ice_update_pkt_fwd_rule(hw, fi);
2591 ice_release_lock(rule_lock);
2597 * ice_add_update_vsi_list
2598 * @hw: pointer to the hardware structure
2599 * @m_entry: pointer to current filter management list entry
2600 * @cur_fltr: filter information from the book keeping entry
2601 * @new_fltr: filter information with the new VSI to be added
2603 * Call AQ command to add or update previously created VSI list with new VSI.
2605 * Helper function to do book keeping associated with adding filter information
2606 * The algorithm to do the book keeping is described below :
2607 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2608 * if only one VSI has been added till now
2609 * Allocate a new VSI list and add two VSIs
2610 * to this list using switch rule command
2611 * Update the previously created switch rule with the
2612 * newly created VSI list ID
2613 * if a VSI list was previously created
2614 * Add the new VSI to the previously created VSI list set
2615 * using the update switch rule command
2617 static enum ice_status
2618 ice_add_update_vsi_list(struct ice_hw *hw,
2619 struct ice_fltr_mgmt_list_entry *m_entry,
2620 struct ice_fltr_info *cur_fltr,
2621 struct ice_fltr_info *new_fltr)
2623 enum ice_status status = ICE_SUCCESS;
2624 u16 vsi_list_id = 0;
2626 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2627 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2628 return ICE_ERR_NOT_IMPL;
2630 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2631 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2632 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2633 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2634 return ICE_ERR_NOT_IMPL;
2636 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2637 /* Only one entry existed in the mapping and it was not already
2638 * a part of a VSI list. So, create a VSI list with the old and
2641 struct ice_fltr_info tmp_fltr;
2642 u16 vsi_handle_arr[2];
2644 /* A rule already exists with the new VSI being added */
2645 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2646 return ICE_ERR_ALREADY_EXISTS;
2648 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2649 vsi_handle_arr[1] = new_fltr->vsi_handle;
2650 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2652 new_fltr->lkup_type);
2656 tmp_fltr = *new_fltr;
2657 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2658 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2659 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2660 /* Update the previous switch rule of "MAC forward to VSI" to
2661 * "MAC fwd to VSI list"
2663 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2667 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2668 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2669 m_entry->vsi_list_info =
2670 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2673 /* If this entry was large action then the large action needs
2674 * to be updated to point to FWD to VSI list
2676 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2678 ice_add_marker_act(hw, m_entry,
2679 m_entry->sw_marker_id,
2680 m_entry->lg_act_idx);
2682 u16 vsi_handle = new_fltr->vsi_handle;
2683 enum ice_adminq_opc opcode;
2685 if (!m_entry->vsi_list_info)
2688 /* A rule already exists with the new VSI being added */
2689 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2692 /* Update the previously created VSI list set with
2693 * the new VSI ID passed in
2695 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2696 opcode = ice_aqc_opc_update_sw_rules;
2698 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2699 vsi_list_id, false, opcode,
2700 new_fltr->lkup_type);
2701 /* update VSI list mapping info with new VSI ID */
2703 ice_set_bit(vsi_handle,
2704 m_entry->vsi_list_info->vsi_map);
2707 m_entry->vsi_count++;
2712 * ice_find_rule_entry - Search a rule entry
2713 * @list_head: head of rule list
2714 * @f_info: rule information
2716 * Helper function to search for a given rule entry
2717 * Returns pointer to entry storing the rule if found
2719 static struct ice_fltr_mgmt_list_entry *
2720 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2721 struct ice_fltr_info *f_info)
2723 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2725 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2727 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2728 sizeof(f_info->l_data)) &&
2729 f_info->flag == list_itr->fltr_info.flag) {
2738 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2739 * @recp_list: VSI lists needs to be searched
2740 * @vsi_handle: VSI handle to be found in VSI list
2741 * @vsi_list_id: VSI list ID found containing vsi_handle
2743 * Helper function to search a VSI list with single entry containing given VSI
2744 * handle element. This can be extended further to search VSI list with more
2745 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2747 static struct ice_vsi_list_map_info *
2748 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2751 struct ice_vsi_list_map_info *map_info = NULL;
2752 struct LIST_HEAD_TYPE *list_head;
2754 list_head = &recp_list->filt_rules;
2755 if (recp_list->adv_rule) {
2756 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2758 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2759 ice_adv_fltr_mgmt_list_entry,
2761 if (list_itr->vsi_list_info) {
2762 map_info = list_itr->vsi_list_info;
2763 if (ice_is_bit_set(map_info->vsi_map,
2765 *vsi_list_id = map_info->vsi_list_id;
2771 struct ice_fltr_mgmt_list_entry *list_itr;
2773 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2774 ice_fltr_mgmt_list_entry,
2776 if (list_itr->vsi_count == 1 &&
2777 list_itr->vsi_list_info) {
2778 map_info = list_itr->vsi_list_info;
2779 if (ice_is_bit_set(map_info->vsi_map,
2781 *vsi_list_id = map_info->vsi_list_id;
2791 * ice_add_rule_internal - add rule for a given lookup type
2792 * @hw: pointer to the hardware structure
2793 * @recp_list: recipe list for which rule has to be added
2794 * @lport: logic port number on which function add rule
2795 * @f_entry: structure containing MAC forwarding information
2797 * Adds or updates the rule lists for a given recipe
2799 static enum ice_status
2800 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2801 u8 lport, struct ice_fltr_list_entry *f_entry)
2803 struct ice_fltr_info *new_fltr, *cur_fltr;
2804 struct ice_fltr_mgmt_list_entry *m_entry;
2805 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2806 enum ice_status status = ICE_SUCCESS;
2808 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2809 return ICE_ERR_PARAM;
2811 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2812 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2813 f_entry->fltr_info.fwd_id.hw_vsi_id =
2814 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2816 rule_lock = &recp_list->filt_rule_lock;
2818 ice_acquire_lock(rule_lock);
2819 new_fltr = &f_entry->fltr_info;
2820 if (new_fltr->flag & ICE_FLTR_RX)
2821 new_fltr->src = lport;
2822 else if (new_fltr->flag & ICE_FLTR_TX)
2824 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2826 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2828 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2829 goto exit_add_rule_internal;
2832 cur_fltr = &m_entry->fltr_info;
2833 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2835 exit_add_rule_internal:
2836 ice_release_lock(rule_lock);
2841 * ice_remove_vsi_list_rule
2842 * @hw: pointer to the hardware structure
2843 * @vsi_list_id: VSI list ID generated as part of allocate resource
2844 * @lkup_type: switch rule filter lookup type
2846 * The VSI list should be emptied before this function is called to remove the
2849 static enum ice_status
2850 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2851 enum ice_sw_lkup_type lkup_type)
2853 struct ice_aqc_sw_rules_elem *s_rule;
2854 enum ice_status status;
2857 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2858 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2860 return ICE_ERR_NO_MEMORY;
2862 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2863 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2865 /* Free the vsi_list resource that we allocated. It is assumed that the
2866 * list is empty at this point.
2868 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2869 ice_aqc_opc_free_res);
2871 ice_free(hw, s_rule);
2876 * ice_rem_update_vsi_list
2877 * @hw: pointer to the hardware structure
2878 * @vsi_handle: VSI handle of the VSI to remove
2879 * @fm_list: filter management entry for which the VSI list management needs to
2882 static enum ice_status
2883 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2884 struct ice_fltr_mgmt_list_entry *fm_list)
2886 enum ice_sw_lkup_type lkup_type;
2887 enum ice_status status = ICE_SUCCESS;
2890 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2891 fm_list->vsi_count == 0)
2892 return ICE_ERR_PARAM;
2894 /* A rule with the VSI being removed does not exist */
2895 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2896 return ICE_ERR_DOES_NOT_EXIST;
2898 lkup_type = fm_list->fltr_info.lkup_type;
2899 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2900 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2901 ice_aqc_opc_update_sw_rules,
2906 fm_list->vsi_count--;
2907 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2909 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2910 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2911 struct ice_vsi_list_map_info *vsi_list_info =
2912 fm_list->vsi_list_info;
2915 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2917 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2918 return ICE_ERR_OUT_OF_RANGE;
2920 /* Make sure VSI list is empty before removing it below */
2921 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2923 ice_aqc_opc_update_sw_rules,
2928 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2929 tmp_fltr_info.fwd_id.hw_vsi_id =
2930 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2931 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2932 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2934 ice_debug(hw, ICE_DBG_SW,
2935 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2936 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2940 fm_list->fltr_info = tmp_fltr_info;
2943 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2944 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2945 struct ice_vsi_list_map_info *vsi_list_info =
2946 fm_list->vsi_list_info;
2948 /* Remove the VSI list since it is no longer used */
2949 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2951 ice_debug(hw, ICE_DBG_SW,
2952 "Failed to remove VSI list %d, error %d\n",
2953 vsi_list_id, status);
2957 LIST_DEL(&vsi_list_info->list_entry);
2958 ice_free(hw, vsi_list_info);
2959 fm_list->vsi_list_info = NULL;
2966 * ice_remove_rule_internal - Remove a filter rule of a given type
2968 * @hw: pointer to the hardware structure
2969 * @recp_list: recipe list for which the rule needs to removed
2970 * @f_entry: rule entry containing filter information
2972 static enum ice_status
2973 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2974 struct ice_fltr_list_entry *f_entry)
2976 struct ice_fltr_mgmt_list_entry *list_elem;
2977 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2978 enum ice_status status = ICE_SUCCESS;
2979 bool remove_rule = false;
2982 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2983 return ICE_ERR_PARAM;
2984 f_entry->fltr_info.fwd_id.hw_vsi_id =
2985 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2987 rule_lock = &recp_list->filt_rule_lock;
2988 ice_acquire_lock(rule_lock);
2989 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2990 &f_entry->fltr_info);
2992 status = ICE_ERR_DOES_NOT_EXIST;
2996 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2998 } else if (!list_elem->vsi_list_info) {
2999 status = ICE_ERR_DOES_NOT_EXIST;
3001 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3002 /* a ref_cnt > 1 indicates that the vsi_list is being
3003 * shared by multiple rules. Decrement the ref_cnt and
3004 * remove this rule, but do not modify the list, as it
3005 * is in-use by other rules.
3007 list_elem->vsi_list_info->ref_cnt--;
3010 /* a ref_cnt of 1 indicates the vsi_list is only used
3011 * by one rule. However, the original removal request is only
3012 * for a single VSI. Update the vsi_list first, and only
3013 * remove the rule if there are no further VSIs in this list.
3015 vsi_handle = f_entry->fltr_info.vsi_handle;
3016 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3019 /* if VSI count goes to zero after updating the VSI list */
3020 if (list_elem->vsi_count == 0)
3025 /* Remove the lookup rule */
3026 struct ice_aqc_sw_rules_elem *s_rule;
3028 s_rule = (struct ice_aqc_sw_rules_elem *)
3029 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3031 status = ICE_ERR_NO_MEMORY;
3035 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3036 ice_aqc_opc_remove_sw_rules);
3038 status = ice_aq_sw_rules(hw, s_rule,
3039 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3040 ice_aqc_opc_remove_sw_rules, NULL);
3042 /* Remove a book keeping from the list */
3043 ice_free(hw, s_rule);
3048 LIST_DEL(&list_elem->list_entry);
3049 ice_free(hw, list_elem);
3052 ice_release_lock(rule_lock);
3057 * ice_aq_get_res_alloc - get allocated resources
3058 * @hw: pointer to the HW struct
3059 * @num_entries: pointer to u16 to store the number of resource entries returned
3060 * @buf: pointer to user-supplied buffer
3061 * @buf_size: size of buff
3062 * @cd: pointer to command details structure or NULL
3064 * The user-supplied buffer must be large enough to store the resource
3065 * information for all resource types. Each resource type is an
3066 * ice_aqc_get_res_resp_data_elem structure.
3069 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3070 u16 buf_size, struct ice_sq_cd *cd)
3072 struct ice_aqc_get_res_alloc *resp;
3073 enum ice_status status;
3074 struct ice_aq_desc desc;
3077 return ICE_ERR_BAD_PTR;
3079 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3080 return ICE_ERR_INVAL_SIZE;
3082 resp = &desc.params.get_res;
3084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3085 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3087 if (!status && num_entries)
3088 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3094 * ice_aq_get_res_descs - get allocated resource descriptors
3095 * @hw: pointer to the hardware structure
3096 * @num_entries: number of resource entries in buffer
3097 * @buf: Indirect buffer to hold data parameters and response
3098 * @buf_size: size of buffer for indirect commands
3099 * @res_type: resource type
3100 * @res_shared: is resource shared
3101 * @desc_id: input - first desc ID to start; output - next desc ID
3102 * @cd: pointer to command details structure or NULL
3105 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3106 struct ice_aqc_get_allocd_res_desc_resp *buf,
3107 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3108 struct ice_sq_cd *cd)
3110 struct ice_aqc_get_allocd_res_desc *cmd;
3111 struct ice_aq_desc desc;
3112 enum ice_status status;
3114 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3116 cmd = &desc.params.get_res_desc;
3119 return ICE_ERR_PARAM;
3121 if (buf_size != (num_entries * sizeof(*buf)))
3122 return ICE_ERR_PARAM;
3124 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3126 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3127 ICE_AQC_RES_TYPE_M) | (res_shared ?
3128 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3129 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3131 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3133 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3139 * ice_add_mac_rule - Add a MAC address based filter rule
3140 * @hw: pointer to the hardware structure
3141 * @m_list: list of MAC addresses and forwarding information
3142 * @sw: pointer to switch info struct for which function add rule
3143 * @lport: logic port number on which function add rule
3145 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3146 * multiple unicast addresses, the function assumes that all the
3147 * addresses are unique in a given add_mac call. It doesn't
3148 * check for duplicates in this case, removing duplicates from a given
3149 * list should be taken care of in the caller of this function.
3151 static enum ice_status
3152 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3153 struct ice_switch_info *sw, u8 lport)
3155 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3156 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3157 struct ice_fltr_list_entry *m_list_itr;
3158 struct LIST_HEAD_TYPE *rule_head;
3159 u16 total_elem_left, s_rule_size;
3160 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3161 enum ice_status status = ICE_SUCCESS;
3162 u16 num_unicast = 0;
3166 rule_lock = &recp_list->filt_rule_lock;
3167 rule_head = &recp_list->filt_rules;
3169 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3171 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3175 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3176 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3177 if (!ice_is_vsi_valid(hw, vsi_handle))
3178 return ICE_ERR_PARAM;
3179 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3180 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3181 /* update the src in case it is VSI num */
3182 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3183 return ICE_ERR_PARAM;
3184 m_list_itr->fltr_info.src = hw_vsi_id;
3185 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3186 IS_ZERO_ETHER_ADDR(add))
3187 return ICE_ERR_PARAM;
3188 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3189 /* Don't overwrite the unicast address */
3190 ice_acquire_lock(rule_lock);
3191 if (ice_find_rule_entry(rule_head,
3192 &m_list_itr->fltr_info)) {
3193 ice_release_lock(rule_lock);
3194 return ICE_ERR_ALREADY_EXISTS;
3196 ice_release_lock(rule_lock);
3198 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3199 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3200 m_list_itr->status =
3201 ice_add_rule_internal(hw, recp_list, lport,
3203 if (m_list_itr->status)
3204 return m_list_itr->status;
3208 ice_acquire_lock(rule_lock);
3209 /* Exit if no suitable entries were found for adding bulk switch rule */
3211 status = ICE_SUCCESS;
3212 goto ice_add_mac_exit;
3215 /* Allocate switch rule buffer for the bulk update for unicast */
3216 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3217 s_rule = (struct ice_aqc_sw_rules_elem *)
3218 ice_calloc(hw, num_unicast, s_rule_size);
3220 status = ICE_ERR_NO_MEMORY;
3221 goto ice_add_mac_exit;
3225 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3227 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3228 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3230 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3231 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3232 ice_aqc_opc_add_sw_rules);
3233 r_iter = (struct ice_aqc_sw_rules_elem *)
3234 ((u8 *)r_iter + s_rule_size);
3238 /* Call AQ bulk switch rule update for all unicast addresses */
3240 /* Call AQ switch rule in AQ_MAX chunk */
3241 for (total_elem_left = num_unicast; total_elem_left > 0;
3242 total_elem_left -= elem_sent) {
3243 struct ice_aqc_sw_rules_elem *entry = r_iter;
3245 elem_sent = MIN_T(u8, total_elem_left,
3246 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3247 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3248 elem_sent, ice_aqc_opc_add_sw_rules,
3251 goto ice_add_mac_exit;
3252 r_iter = (struct ice_aqc_sw_rules_elem *)
3253 ((u8 *)r_iter + (elem_sent * s_rule_size));
3256 /* Fill up rule ID based on the value returned from FW */
3258 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3260 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3261 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3262 struct ice_fltr_mgmt_list_entry *fm_entry;
3264 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3265 f_info->fltr_rule_id =
3266 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3267 f_info->fltr_act = ICE_FWD_TO_VSI;
3268 /* Create an entry to track this MAC address */
3269 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3270 ice_malloc(hw, sizeof(*fm_entry));
3272 status = ICE_ERR_NO_MEMORY;
3273 goto ice_add_mac_exit;
3275 fm_entry->fltr_info = *f_info;
3276 fm_entry->vsi_count = 1;
3277 /* The book keeping entries will get removed when
3278 * base driver calls remove filter AQ command
3281 LIST_ADD(&fm_entry->list_entry, rule_head);
3282 r_iter = (struct ice_aqc_sw_rules_elem *)
3283 ((u8 *)r_iter + s_rule_size);
3288 ice_release_lock(rule_lock);
3290 ice_free(hw, s_rule);
3295 * ice_add_mac - Add a MAC address based filter rule
3296 * @hw: pointer to the hardware structure
3297 * @m_list: list of MAC addresses and forwarding information
3299 * Function add MAC rule for logical port from HW struct
3302 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3305 return ICE_ERR_PARAM;
3307 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3308 hw->port_info->lport);
3312 * ice_add_vlan_internal - Add one VLAN based filter rule
3313 * @hw: pointer to the hardware structure
3314 * @recp_list: recipe list for which rule has to be added
3315 * @f_entry: filter entry containing one VLAN information
3317 static enum ice_status
3318 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3319 struct ice_fltr_list_entry *f_entry)
3321 struct ice_fltr_mgmt_list_entry *v_list_itr;
3322 struct ice_fltr_info *new_fltr, *cur_fltr;
3323 enum ice_sw_lkup_type lkup_type;
3324 u16 vsi_list_id = 0, vsi_handle;
3325 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3326 enum ice_status status = ICE_SUCCESS;
3328 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3329 return ICE_ERR_PARAM;
3331 f_entry->fltr_info.fwd_id.hw_vsi_id =
3332 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3333 new_fltr = &f_entry->fltr_info;
3335 /* VLAN ID should only be 12 bits */
3336 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3337 return ICE_ERR_PARAM;
3339 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3340 return ICE_ERR_PARAM;
3342 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3343 lkup_type = new_fltr->lkup_type;
3344 vsi_handle = new_fltr->vsi_handle;
3345 rule_lock = &recp_list->filt_rule_lock;
3346 ice_acquire_lock(rule_lock);
3347 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3349 struct ice_vsi_list_map_info *map_info = NULL;
3351 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3352 /* All VLAN pruning rules use a VSI list. Check if
3353 * there is already a VSI list containing VSI that we
3354 * want to add. If found, use the same vsi_list_id for
3355 * this new VLAN rule or else create a new list.
3357 map_info = ice_find_vsi_list_entry(recp_list,
3361 status = ice_create_vsi_list_rule(hw,
3369 /* Convert the action to forwarding to a VSI list. */
3370 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3371 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3374 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3376 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3379 status = ICE_ERR_DOES_NOT_EXIST;
3382 /* reuse VSI list for new rule and increment ref_cnt */
3384 v_list_itr->vsi_list_info = map_info;
3385 map_info->ref_cnt++;
3387 v_list_itr->vsi_list_info =
3388 ice_create_vsi_list_map(hw, &vsi_handle,
3392 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3393 /* Update existing VSI list to add new VSI ID only if it used
3396 cur_fltr = &v_list_itr->fltr_info;
3397 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3400 /* If VLAN rule exists and VSI list being used by this rule is
3401 * referenced by more than 1 VLAN rule. Then create a new VSI
3402 * list appending previous VSI with new VSI and update existing
3403 * VLAN rule to point to new VSI list ID
3405 struct ice_fltr_info tmp_fltr;
3406 u16 vsi_handle_arr[2];
3409 /* Current implementation only supports reusing VSI list with
3410 * one VSI count. We should never hit below condition
3412 if (v_list_itr->vsi_count > 1 &&
3413 v_list_itr->vsi_list_info->ref_cnt > 1) {
3414 ice_debug(hw, ICE_DBG_SW,
3415 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3416 status = ICE_ERR_CFG;
3421 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3424 /* A rule already exists with the new VSI being added */
3425 if (cur_handle == vsi_handle) {
3426 status = ICE_ERR_ALREADY_EXISTS;
3430 vsi_handle_arr[0] = cur_handle;
3431 vsi_handle_arr[1] = vsi_handle;
3432 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3433 &vsi_list_id, lkup_type);
3437 tmp_fltr = v_list_itr->fltr_info;
3438 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3439 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3440 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3441 /* Update the previous switch rule to a new VSI list which
3442 * includes current VSI that is requested
3444 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3448 /* before overriding VSI list map info. decrement ref_cnt of
3451 v_list_itr->vsi_list_info->ref_cnt--;
3453 /* now update to newly created list */
3454 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3455 v_list_itr->vsi_list_info =
3456 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3458 v_list_itr->vsi_count++;
3462 ice_release_lock(rule_lock);
3467 * ice_add_vlan_rule - Add VLAN based filter rule
3468 * @hw: pointer to the hardware structure
3469 * @v_list: list of VLAN entries and forwarding information
3470 * @sw: pointer to switch info struct for which function add rule
3472 static enum ice_status
3473 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3474 struct ice_switch_info *sw)
3476 struct ice_fltr_list_entry *v_list_itr;
3477 struct ice_sw_recipe *recp_list;
3479 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3480 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3482 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3483 return ICE_ERR_PARAM;
3484 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3485 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3487 if (v_list_itr->status)
3488 return v_list_itr->status;
3494 * ice_add_vlan - Add a VLAN based filter rule
3495 * @hw: pointer to the hardware structure
3496 * @v_list: list of VLAN and forwarding information
3498 * Function add VLAN rule for logical port from HW struct
3501 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3504 return ICE_ERR_PARAM;
3506 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3510 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3511 * @hw: pointer to the hardware structure
3512 * @mv_list: list of MAC and VLAN filters
3514 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3515 * pruning bits enabled, then it is the responsibility of the caller to make
3516 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3517 * VLAN won't be received on that VSI otherwise.
3520 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3522 struct ice_fltr_list_entry *mv_list_itr;
3523 struct ice_sw_recipe *recp_list;
3525 if (!mv_list || !hw)
3526 return ICE_ERR_PARAM;
3528 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3529 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3531 enum ice_sw_lkup_type l_type =
3532 mv_list_itr->fltr_info.lkup_type;
3534 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3535 return ICE_ERR_PARAM;
3536 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3537 mv_list_itr->status =
3538 ice_add_rule_internal(hw, recp_list,
3539 hw->port_info->lport,
3541 if (mv_list_itr->status)
3542 return mv_list_itr->status;
3548 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3549 * @hw: pointer to the hardware structure
3550 * @em_list: list of ether type MAC filter, MAC is optional
3551 * @sw: pointer to switch info struct for which function add rule
3552 * @lport: logic port number on which function add rule
3554 * This function requires the caller to populate the entries in
3555 * the filter list with the necessary fields (including flags to
3556 * indicate Tx or Rx rules).
3558 static enum ice_status
3559 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3560 struct ice_switch_info *sw, u8 lport)
3562 struct ice_fltr_list_entry *em_list_itr;
3564 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3566 struct ice_sw_recipe *recp_list;
3567 enum ice_sw_lkup_type l_type;
3569 l_type = em_list_itr->fltr_info.lkup_type;
3570 recp_list = &sw->recp_list[l_type];
3572 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3573 l_type != ICE_SW_LKUP_ETHERTYPE)
3574 return ICE_ERR_PARAM;
3576 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3579 if (em_list_itr->status)
3580 return em_list_itr->status;
3587 * ice_add_eth_mac - Add a ethertype based filter rule
3588 * @hw: pointer to the hardware structure
3589 * @em_list: list of ethertype and forwarding information
3591 * Function add ethertype rule for logical port from HW struct
3593 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3595 if (!em_list || !hw)
3596 return ICE_ERR_PARAM;
3598 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3599 hw->port_info->lport);
3603 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3604 * @hw: pointer to the hardware structure
3605 * @em_list: list of ethertype or ethertype MAC entries
3606 * @sw: pointer to switch info struct for which function add rule
3608 static enum ice_status
3609 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3610 struct ice_switch_info *sw)
3612 struct ice_fltr_list_entry *em_list_itr, *tmp;
3614 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3616 struct ice_sw_recipe *recp_list;
3617 enum ice_sw_lkup_type l_type;
3619 l_type = em_list_itr->fltr_info.lkup_type;
3621 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3622 l_type != ICE_SW_LKUP_ETHERTYPE)
3623 return ICE_ERR_PARAM;
3625 recp_list = &sw->recp_list[l_type];
3626 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3628 if (em_list_itr->status)
3629 return em_list_itr->status;
3635 * ice_remove_eth_mac - remove a ethertype based filter rule
3636 * @hw: pointer to the hardware structure
3637 * @em_list: list of ethertype and forwarding information
3641 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3643 if (!em_list || !hw)
3644 return ICE_ERR_PARAM;
3646 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3650 * ice_rem_sw_rule_info
3651 * @hw: pointer to the hardware structure
3652 * @rule_head: pointer to the switch list structure that we want to delete
3655 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3657 if (!LIST_EMPTY(rule_head)) {
3658 struct ice_fltr_mgmt_list_entry *entry;
3659 struct ice_fltr_mgmt_list_entry *tmp;
3661 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3662 ice_fltr_mgmt_list_entry, list_entry) {
3663 LIST_DEL(&entry->list_entry);
3664 ice_free(hw, entry);
3670 * ice_rem_adv_rule_info
3671 * @hw: pointer to the hardware structure
3672 * @rule_head: pointer to the switch list structure that we want to delete
3675 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3677 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3678 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3680 if (LIST_EMPTY(rule_head))
3683 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3684 ice_adv_fltr_mgmt_list_entry, list_entry) {
3685 LIST_DEL(&lst_itr->list_entry);
3686 ice_free(hw, lst_itr->lkups);
3687 ice_free(hw, lst_itr);
3692 * ice_rem_all_sw_rules_info
3693 * @hw: pointer to the hardware structure
3695 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3697 struct ice_switch_info *sw = hw->switch_info;
3700 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3701 struct LIST_HEAD_TYPE *rule_head;
3703 rule_head = &sw->recp_list[i].filt_rules;
3704 if (!sw->recp_list[i].adv_rule)
3705 ice_rem_sw_rule_info(hw, rule_head);
3707 ice_rem_adv_rule_info(hw, rule_head);
3712 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3713 * @pi: pointer to the port_info structure
3714 * @vsi_handle: VSI handle to set as default
3715 * @set: true to add the above mentioned switch rule, false to remove it
3716 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3718 * add filter rule to set/unset given VSI as default VSI for the switch
3719 * (represented by swid)
3722 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3725 struct ice_aqc_sw_rules_elem *s_rule;
3726 struct ice_fltr_info f_info;
3727 struct ice_hw *hw = pi->hw;
3728 enum ice_adminq_opc opcode;
3729 enum ice_status status;
3733 if (!ice_is_vsi_valid(hw, vsi_handle))
3734 return ICE_ERR_PARAM;
3735 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3737 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3738 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3739 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3741 return ICE_ERR_NO_MEMORY;
3743 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3745 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3746 f_info.flag = direction;
3747 f_info.fltr_act = ICE_FWD_TO_VSI;
3748 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3750 if (f_info.flag & ICE_FLTR_RX) {
3751 f_info.src = pi->lport;
3752 f_info.src_id = ICE_SRC_ID_LPORT;
3754 f_info.fltr_rule_id =
3755 pi->dflt_rx_vsi_rule_id;
3756 } else if (f_info.flag & ICE_FLTR_TX) {
3757 f_info.src_id = ICE_SRC_ID_VSI;
3758 f_info.src = hw_vsi_id;
3760 f_info.fltr_rule_id =
3761 pi->dflt_tx_vsi_rule_id;
3765 opcode = ice_aqc_opc_add_sw_rules;
3767 opcode = ice_aqc_opc_remove_sw_rules;
3769 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3771 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3772 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3775 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3777 if (f_info.flag & ICE_FLTR_TX) {
3778 pi->dflt_tx_vsi_num = hw_vsi_id;
3779 pi->dflt_tx_vsi_rule_id = index;
3780 } else if (f_info.flag & ICE_FLTR_RX) {
3781 pi->dflt_rx_vsi_num = hw_vsi_id;
3782 pi->dflt_rx_vsi_rule_id = index;
3785 if (f_info.flag & ICE_FLTR_TX) {
3786 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3787 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3788 } else if (f_info.flag & ICE_FLTR_RX) {
3789 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3790 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3795 ice_free(hw, s_rule);
3800 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3801 * @list_head: head of rule list
3802 * @f_info: rule information
3804 * Helper function to search for a unicast rule entry - this is to be used
3805 * to remove unicast MAC filter that is not shared with other VSIs on the
3808 * Returns pointer to entry storing the rule if found
3810 static struct ice_fltr_mgmt_list_entry *
3811 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3812 struct ice_fltr_info *f_info)
3814 struct ice_fltr_mgmt_list_entry *list_itr;
3816 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3818 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3819 sizeof(f_info->l_data)) &&
3820 f_info->fwd_id.hw_vsi_id ==
3821 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3822 f_info->flag == list_itr->fltr_info.flag)
3829 * ice_remove_mac_rule - remove a MAC based filter rule
3830 * @hw: pointer to the hardware structure
3831 * @m_list: list of MAC addresses and forwarding information
3832 * @recp_list: list from which function remove MAC address
3834 * This function removes either a MAC filter rule or a specific VSI from a
3835 * VSI list for a multicast MAC address.
3837 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3838 * ice_add_mac. Caller should be aware that this call will only work if all
3839 * the entries passed into m_list were added previously. It will not attempt to
3840 * do a partial remove of entries that were found.
3842 static enum ice_status
3843 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3844 struct ice_sw_recipe *recp_list)
3846 struct ice_fltr_list_entry *list_itr, *tmp;
3847 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3850 return ICE_ERR_PARAM;
3852 rule_lock = &recp_list->filt_rule_lock;
3853 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3855 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3856 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3859 if (l_type != ICE_SW_LKUP_MAC)
3860 return ICE_ERR_PARAM;
3862 vsi_handle = list_itr->fltr_info.vsi_handle;
3863 if (!ice_is_vsi_valid(hw, vsi_handle))
3864 return ICE_ERR_PARAM;
3866 list_itr->fltr_info.fwd_id.hw_vsi_id =
3867 ice_get_hw_vsi_num(hw, vsi_handle);
3868 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3869 /* Don't remove the unicast address that belongs to
3870 * another VSI on the switch, since it is not being
3873 ice_acquire_lock(rule_lock);
3874 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3875 &list_itr->fltr_info)) {
3876 ice_release_lock(rule_lock);
3877 return ICE_ERR_DOES_NOT_EXIST;
3879 ice_release_lock(rule_lock);
3881 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3883 if (list_itr->status)
3884 return list_itr->status;
3890 * ice_remove_mac - remove a MAC address based filter rule
3891 * @hw: pointer to the hardware structure
3892 * @m_list: list of MAC addresses and forwarding information
3896 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3898 struct ice_sw_recipe *recp_list;
3900 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3901 return ice_remove_mac_rule(hw, m_list, recp_list);
3905 * ice_remove_vlan_rule - Remove VLAN based filter rule
3906 * @hw: pointer to the hardware structure
3907 * @v_list: list of VLAN entries and forwarding information
3908 * @recp_list: list from which function remove VLAN
3910 static enum ice_status
3911 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3912 struct ice_sw_recipe *recp_list)
3914 struct ice_fltr_list_entry *v_list_itr, *tmp;
3916 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3918 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3920 if (l_type != ICE_SW_LKUP_VLAN)
3921 return ICE_ERR_PARAM;
3922 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3924 if (v_list_itr->status)
3925 return v_list_itr->status;
3931 * ice_remove_vlan - remove a VLAN address based filter rule
3932 * @hw: pointer to the hardware structure
3933 * @v_list: list of VLAN and forwarding information
3937 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3939 struct ice_sw_recipe *recp_list;
3942 return ICE_ERR_PARAM;
3944 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3945 return ice_remove_vlan_rule(hw, v_list, recp_list);
3949 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3950 * @hw: pointer to the hardware structure
3951 * @v_list: list of MAC VLAN entries and forwarding information
3954 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3956 struct ice_fltr_list_entry *v_list_itr, *tmp;
3957 struct ice_sw_recipe *recp_list;
3960 return ICE_ERR_PARAM;
3962 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3963 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3965 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3967 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3968 return ICE_ERR_PARAM;
3969 v_list_itr->status =
3970 ice_remove_rule_internal(hw, recp_list,
3972 if (v_list_itr->status)
3973 return v_list_itr->status;
3979 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3980 * @fm_entry: filter entry to inspect
3981 * @vsi_handle: VSI handle to compare with filter info
3984 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3986 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3987 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3988 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3989 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3994 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3995 * @hw: pointer to the hardware structure
3996 * @vsi_handle: VSI handle to remove filters from
3997 * @vsi_list_head: pointer to the list to add entry to
3998 * @fi: pointer to fltr_info of filter entry to copy & add
4000 * Helper function, used when creating a list of filters to remove from
4001 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4002 * original filter entry, with the exception of fltr_info.fltr_act and
4003 * fltr_info.fwd_id fields. These are set such that later logic can
4004 * extract which VSI to remove the fltr from, and pass on that information.
4006 static enum ice_status
4007 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4008 struct LIST_HEAD_TYPE *vsi_list_head,
4009 struct ice_fltr_info *fi)
4011 struct ice_fltr_list_entry *tmp;
4013 /* this memory is freed up in the caller function
4014 * once filters for this VSI are removed
4016 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4018 return ICE_ERR_NO_MEMORY;
4020 tmp->fltr_info = *fi;
4022 /* Overwrite these fields to indicate which VSI to remove filter from,
4023 * so find and remove logic can extract the information from the
4024 * list entries. Note that original entries will still have proper
4027 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4028 tmp->fltr_info.vsi_handle = vsi_handle;
4029 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4031 LIST_ADD(&tmp->list_entry, vsi_list_head);
4037 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4038 * @hw: pointer to the hardware structure
4039 * @vsi_handle: VSI handle to remove filters from
4040 * @lkup_list_head: pointer to the list that has certain lookup type filters
4041 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4043 * Locates all filters in lkup_list_head that are used by the given VSI,
4044 * and adds COPIES of those entries to vsi_list_head (intended to be used
4045 * to remove the listed filters).
4046 * Note that this means all entries in vsi_list_head must be explicitly
4047 * deallocated by the caller when done with list.
4049 static enum ice_status
4050 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4051 struct LIST_HEAD_TYPE *lkup_list_head,
4052 struct LIST_HEAD_TYPE *vsi_list_head)
4054 struct ice_fltr_mgmt_list_entry *fm_entry;
4055 enum ice_status status = ICE_SUCCESS;
4057 /* check to make sure VSI ID is valid and within boundary */
4058 if (!ice_is_vsi_valid(hw, vsi_handle))
4059 return ICE_ERR_PARAM;
4061 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4062 ice_fltr_mgmt_list_entry, list_entry) {
4063 struct ice_fltr_info *fi;
4065 fi = &fm_entry->fltr_info;
4066 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4069 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4078 * ice_determine_promisc_mask
4079 * @fi: filter info to parse
4081 * Helper function to determine which ICE_PROMISC_ mask corresponds
4082 * to given filter into.
4084 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4086 u16 vid = fi->l_data.mac_vlan.vlan_id;
4087 u8 *macaddr = fi->l_data.mac.mac_addr;
4088 bool is_tx_fltr = false;
4089 u8 promisc_mask = 0;
4091 if (fi->flag == ICE_FLTR_TX)
4094 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4095 promisc_mask |= is_tx_fltr ?
4096 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4097 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4098 promisc_mask |= is_tx_fltr ?
4099 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4100 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4101 promisc_mask |= is_tx_fltr ?
4102 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4104 promisc_mask |= is_tx_fltr ?
4105 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4107 return promisc_mask;
4111 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4112 * @hw: pointer to the hardware structure
4113 * @vsi_handle: VSI handle to retrieve info from
4114 * @promisc_mask: pointer to mask to be filled in
4115 * @vid: VLAN ID of promisc VLAN VSI
4118 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4121 struct ice_switch_info *sw = hw->switch_info;
4122 struct ice_fltr_mgmt_list_entry *itr;
4123 struct LIST_HEAD_TYPE *rule_head;
4124 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4126 if (!ice_is_vsi_valid(hw, vsi_handle))
4127 return ICE_ERR_PARAM;
4131 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4132 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4134 ice_acquire_lock(rule_lock);
4135 LIST_FOR_EACH_ENTRY(itr, rule_head,
4136 ice_fltr_mgmt_list_entry, list_entry) {
4137 /* Continue if this filter doesn't apply to this VSI or the
4138 * VSI ID is not in the VSI map for this filter
4140 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4143 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4145 ice_release_lock(rule_lock);
4151 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4152 * @hw: pointer to the hardware structure
4153 * @vsi_handle: VSI handle to retrieve info from
4154 * @promisc_mask: pointer to mask to be filled in
4155 * @vid: VLAN ID of promisc VLAN VSI
4158 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4161 struct ice_switch_info *sw = hw->switch_info;
4162 struct ice_fltr_mgmt_list_entry *itr;
4163 struct LIST_HEAD_TYPE *rule_head;
4164 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4166 if (!ice_is_vsi_valid(hw, vsi_handle))
4167 return ICE_ERR_PARAM;
4171 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4172 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4174 ice_acquire_lock(rule_lock);
4175 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4177 /* Continue if this filter doesn't apply to this VSI or the
4178 * VSI ID is not in the VSI map for this filter
4180 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4183 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4185 ice_release_lock(rule_lock);
4191 * ice_remove_promisc - Remove promisc based filter rules
4192 * @hw: pointer to the hardware structure
4193 * @recp_id: recipe ID for which the rule needs to removed
4194 * @v_list: list of promisc entries
4196 static enum ice_status
4197 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4198 struct LIST_HEAD_TYPE *v_list)
4200 struct ice_fltr_list_entry *v_list_itr, *tmp;
4201 struct ice_sw_recipe *recp_list;
4203 recp_list = &hw->switch_info->recp_list[recp_id];
4204 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4206 v_list_itr->status =
4207 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4208 if (v_list_itr->status)
4209 return v_list_itr->status;
4215 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4216 * @hw: pointer to the hardware structure
4217 * @vsi_handle: VSI handle to clear mode
4218 * @promisc_mask: mask of promiscuous config bits to clear
4219 * @vid: VLAN ID to clear VLAN promiscuous
4222 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4225 struct ice_switch_info *sw = hw->switch_info;
4226 struct ice_fltr_list_entry *fm_entry, *tmp;
4227 struct LIST_HEAD_TYPE remove_list_head;
4228 struct ice_fltr_mgmt_list_entry *itr;
4229 struct LIST_HEAD_TYPE *rule_head;
4230 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4231 enum ice_status status = ICE_SUCCESS;
4234 if (!ice_is_vsi_valid(hw, vsi_handle))
4235 return ICE_ERR_PARAM;
4237 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4238 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4240 recipe_id = ICE_SW_LKUP_PROMISC;
4242 rule_head = &sw->recp_list[recipe_id].filt_rules;
4243 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4245 INIT_LIST_HEAD(&remove_list_head);
4247 ice_acquire_lock(rule_lock);
4248 LIST_FOR_EACH_ENTRY(itr, rule_head,
4249 ice_fltr_mgmt_list_entry, list_entry) {
4250 struct ice_fltr_info *fltr_info;
4251 u8 fltr_promisc_mask = 0;
4253 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4255 fltr_info = &itr->fltr_info;
4257 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4258 vid != fltr_info->l_data.mac_vlan.vlan_id)
4261 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4263 /* Skip if filter is not completely specified by given mask */
4264 if (fltr_promisc_mask & ~promisc_mask)
4267 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4271 ice_release_lock(rule_lock);
4272 goto free_fltr_list;
4275 ice_release_lock(rule_lock);
4277 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4280 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4281 ice_fltr_list_entry, list_entry) {
4282 LIST_DEL(&fm_entry->list_entry);
4283 ice_free(hw, fm_entry);
4290 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4291 * @hw: pointer to the hardware structure
4292 * @vsi_handle: VSI handle to configure
4293 * @promisc_mask: mask of promiscuous config bits
4294 * @vid: VLAN ID to set VLAN promiscuous
4297 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4299 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4300 struct ice_fltr_list_entry f_list_entry;
4301 struct ice_fltr_info new_fltr;
4302 enum ice_status status = ICE_SUCCESS;
4308 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4310 if (!ice_is_vsi_valid(hw, vsi_handle))
4311 return ICE_ERR_PARAM;
4312 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4314 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4316 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4317 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4318 new_fltr.l_data.mac_vlan.vlan_id = vid;
4319 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4321 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4322 recipe_id = ICE_SW_LKUP_PROMISC;
4325 /* Separate filters must be set for each direction/packet type
4326 * combination, so we will loop over the mask value, store the
4327 * individual type, and clear it out in the input mask as it
4330 while (promisc_mask) {
4331 struct ice_sw_recipe *recp_list;
4337 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4338 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4339 pkt_type = UCAST_FLTR;
4340 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4341 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4342 pkt_type = UCAST_FLTR;
4344 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4345 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4346 pkt_type = MCAST_FLTR;
4347 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4348 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4349 pkt_type = MCAST_FLTR;
4351 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4352 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4353 pkt_type = BCAST_FLTR;
4354 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4355 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4356 pkt_type = BCAST_FLTR;
4360 /* Check for VLAN promiscuous flag */
4361 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4362 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4363 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4364 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4368 /* Set filter DA based on packet type */
4369 mac_addr = new_fltr.l_data.mac.mac_addr;
4370 if (pkt_type == BCAST_FLTR) {
4371 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4372 } else if (pkt_type == MCAST_FLTR ||
4373 pkt_type == UCAST_FLTR) {
4374 /* Use the dummy ether header DA */
4375 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4376 ICE_NONDMA_TO_NONDMA);
4377 if (pkt_type == MCAST_FLTR)
4378 mac_addr[0] |= 0x1; /* Set multicast bit */
4381 /* Need to reset this to zero for all iterations */
4384 new_fltr.flag |= ICE_FLTR_TX;
4385 new_fltr.src = hw_vsi_id;
4387 new_fltr.flag |= ICE_FLTR_RX;
4388 new_fltr.src = hw->port_info->lport;
4391 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4392 new_fltr.vsi_handle = vsi_handle;
4393 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4394 f_list_entry.fltr_info = new_fltr;
4395 recp_list = &hw->switch_info->recp_list[recipe_id];
4397 status = ice_add_rule_internal(hw, recp_list,
4398 hw->port_info->lport,
4400 if (status != ICE_SUCCESS)
4401 goto set_promisc_exit;
4409 * ice_set_vlan_vsi_promisc
4410 * @hw: pointer to the hardware structure
4411 * @vsi_handle: VSI handle to configure
4412 * @promisc_mask: mask of promiscuous config bits
4413 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4415 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4418 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4419 bool rm_vlan_promisc)
4421 struct ice_switch_info *sw = hw->switch_info;
4422 struct ice_fltr_list_entry *list_itr, *tmp;
4423 struct LIST_HEAD_TYPE vsi_list_head;
4424 struct LIST_HEAD_TYPE *vlan_head;
4425 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4426 enum ice_status status;
4429 INIT_LIST_HEAD(&vsi_list_head);
4430 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4431 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4432 ice_acquire_lock(vlan_lock);
4433 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4435 ice_release_lock(vlan_lock);
4437 goto free_fltr_list;
4439 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4441 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4442 if (rm_vlan_promisc)
4443 status = ice_clear_vsi_promisc(hw, vsi_handle,
4444 promisc_mask, vlan_id);
4446 status = ice_set_vsi_promisc(hw, vsi_handle,
4447 promisc_mask, vlan_id);
4453 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4454 ice_fltr_list_entry, list_entry) {
4455 LIST_DEL(&list_itr->list_entry);
4456 ice_free(hw, list_itr);
4462 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4463 * @hw: pointer to the hardware structure
4464 * @vsi_handle: VSI handle to remove filters from
4465 * @recp_list: recipe list from which function remove fltr
4466 * @lkup: switch rule filter lookup type
4469 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4470 struct ice_sw_recipe *recp_list,
4471 enum ice_sw_lkup_type lkup)
4473 struct ice_fltr_list_entry *fm_entry;
4474 struct LIST_HEAD_TYPE remove_list_head;
4475 struct LIST_HEAD_TYPE *rule_head;
4476 struct ice_fltr_list_entry *tmp;
4477 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4478 enum ice_status status;
4480 INIT_LIST_HEAD(&remove_list_head);
4481 rule_lock = &recp_list[lkup].filt_rule_lock;
4482 rule_head = &recp_list[lkup].filt_rules;
4483 ice_acquire_lock(rule_lock);
4484 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4486 ice_release_lock(rule_lock);
4491 case ICE_SW_LKUP_MAC:
4492 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4494 case ICE_SW_LKUP_VLAN:
4495 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4497 case ICE_SW_LKUP_PROMISC:
4498 case ICE_SW_LKUP_PROMISC_VLAN:
4499 ice_remove_promisc(hw, lkup, &remove_list_head);
4501 case ICE_SW_LKUP_MAC_VLAN:
4502 ice_remove_mac_vlan(hw, &remove_list_head);
4504 case ICE_SW_LKUP_ETHERTYPE:
4505 case ICE_SW_LKUP_ETHERTYPE_MAC:
4506 ice_remove_eth_mac(hw, &remove_list_head);
4508 case ICE_SW_LKUP_DFLT:
4509 ice_debug(hw, ICE_DBG_SW,
4510 "Remove filters for this lookup type hasn't been implemented yet\n");
4512 case ICE_SW_LKUP_LAST:
4513 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4517 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4518 ice_fltr_list_entry, list_entry) {
4519 LIST_DEL(&fm_entry->list_entry);
4520 ice_free(hw, fm_entry);
4525 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4526 * @hw: pointer to the hardware structure
4527 * @vsi_handle: VSI handle to remove filters from
4528 * @sw: pointer to switch info struct
4531 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4532 struct ice_switch_info *sw)
4534 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4536 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4537 sw->recp_list, ICE_SW_LKUP_MAC);
4538 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4539 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4540 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4541 sw->recp_list, ICE_SW_LKUP_PROMISC);
4542 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4543 sw->recp_list, ICE_SW_LKUP_VLAN);
4544 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4545 sw->recp_list, ICE_SW_LKUP_DFLT);
4546 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4547 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4548 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4549 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4550 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4551 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4555 * ice_remove_vsi_fltr - Remove all filters for a VSI
4556 * @hw: pointer to the hardware structure
4557 * @vsi_handle: VSI handle to remove filters from
4559 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4561 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4565 * ice_alloc_res_cntr - allocating resource counter
4566 * @hw: pointer to the hardware structure
4567 * @type: type of resource
4568 * @alloc_shared: if set it is shared else dedicated
4569 * @num_items: number of entries requested for FD resource type
4570 * @counter_id: counter index returned by AQ call
4573 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4576 struct ice_aqc_alloc_free_res_elem *buf;
4577 enum ice_status status;
4580 /* Allocate resource */
4581 buf_len = sizeof(*buf);
4582 buf = (struct ice_aqc_alloc_free_res_elem *)
4583 ice_malloc(hw, buf_len);
4585 return ICE_ERR_NO_MEMORY;
4587 buf->num_elems = CPU_TO_LE16(num_items);
4588 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4589 ICE_AQC_RES_TYPE_M) | alloc_shared);
4591 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4592 ice_aqc_opc_alloc_res, NULL);
4596 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4604 * ice_free_res_cntr - free resource counter
4605 * @hw: pointer to the hardware structure
4606 * @type: type of resource
4607 * @alloc_shared: if set it is shared else dedicated
4608 * @num_items: number of entries to be freed for FD resource type
4609 * @counter_id: counter ID resource which needs to be freed
4612 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4615 struct ice_aqc_alloc_free_res_elem *buf;
4616 enum ice_status status;
4620 buf_len = sizeof(*buf);
4621 buf = (struct ice_aqc_alloc_free_res_elem *)
4622 ice_malloc(hw, buf_len);
4624 return ICE_ERR_NO_MEMORY;
4626 buf->num_elems = CPU_TO_LE16(num_items);
4627 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4628 ICE_AQC_RES_TYPE_M) | alloc_shared);
4629 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4631 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4632 ice_aqc_opc_free_res, NULL);
4634 ice_debug(hw, ICE_DBG_SW,
4635 "counter resource could not be freed\n");
4642 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4643 * @hw: pointer to the hardware structure
4644 * @counter_id: returns counter index
4646 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4648 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4649 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4654 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4655 * @hw: pointer to the hardware structure
4656 * @counter_id: counter index to be freed
4658 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4660 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4661 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4666 * ice_alloc_res_lg_act - add large action resource
4667 * @hw: pointer to the hardware structure
4668 * @l_id: large action ID to fill it in
4669 * @num_acts: number of actions to hold with a large action entry
4671 static enum ice_status
4672 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4674 struct ice_aqc_alloc_free_res_elem *sw_buf;
4675 enum ice_status status;
4678 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4679 return ICE_ERR_PARAM;
4681 /* Allocate resource for large action */
4682 buf_len = sizeof(*sw_buf);
4683 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4684 ice_malloc(hw, buf_len);
4686 return ICE_ERR_NO_MEMORY;
4688 sw_buf->num_elems = CPU_TO_LE16(1);
4690 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4691 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4692 * If num_acts is greater than 2, then use
4693 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4694 * The num_acts cannot exceed 4. This was ensured at the
4695 * beginning of the function.
4698 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4699 else if (num_acts == 2)
4700 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4702 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4704 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4705 ice_aqc_opc_alloc_res, NULL);
4707 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4709 ice_free(hw, sw_buf);
4714 * ice_add_mac_with_sw_marker - add filter with sw marker
4715 * @hw: pointer to the hardware structure
4716 * @f_info: filter info structure containing the MAC filter information
4717 * @sw_marker: sw marker to tag the Rx descriptor with
4720 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4723 struct ice_fltr_mgmt_list_entry *m_entry;
4724 struct ice_fltr_list_entry fl_info;
4725 struct ice_sw_recipe *recp_list;
4726 struct LIST_HEAD_TYPE l_head;
4727 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4728 enum ice_status ret;
4732 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4733 return ICE_ERR_PARAM;
4735 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4736 return ICE_ERR_PARAM;
4738 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4739 return ICE_ERR_PARAM;
4741 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4742 return ICE_ERR_PARAM;
4743 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4745 /* Add filter if it doesn't exist so then the adding of large
4746 * action always results in update
4749 INIT_LIST_HEAD(&l_head);
4750 fl_info.fltr_info = *f_info;
4751 LIST_ADD(&fl_info.list_entry, &l_head);
4753 entry_exists = false;
4754 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4755 hw->port_info->lport);
4756 if (ret == ICE_ERR_ALREADY_EXISTS)
4757 entry_exists = true;
4761 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4762 rule_lock = &recp_list->filt_rule_lock;
4763 ice_acquire_lock(rule_lock);
4764 /* Get the book keeping entry for the filter */
4765 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4769 /* If counter action was enabled for this rule then don't enable
4770 * sw marker large action
4772 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4773 ret = ICE_ERR_PARAM;
4777 /* if same marker was added before */
4778 if (m_entry->sw_marker_id == sw_marker) {
4779 ret = ICE_ERR_ALREADY_EXISTS;
4783 /* Allocate a hardware table entry to hold large act. Three actions
4784 * for marker based large action
4786 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4790 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4793 /* Update the switch rule to add the marker action */
4794 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4796 ice_release_lock(rule_lock);
4801 ice_release_lock(rule_lock);
4802 /* only remove entry if it did not exist previously */
4804 ret = ice_remove_mac(hw, &l_head);
4810 * ice_add_mac_with_counter - add filter with counter enabled
4811 * @hw: pointer to the hardware structure
4812 * @f_info: pointer to filter info structure containing the MAC filter
4816 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4818 struct ice_fltr_mgmt_list_entry *m_entry;
4819 struct ice_fltr_list_entry fl_info;
4820 struct ice_sw_recipe *recp_list;
4821 struct LIST_HEAD_TYPE l_head;
4822 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4823 enum ice_status ret;
4828 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4829 return ICE_ERR_PARAM;
4831 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4832 return ICE_ERR_PARAM;
4834 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4835 return ICE_ERR_PARAM;
4836 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4837 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4839 entry_exist = false;
4841 rule_lock = &recp_list->filt_rule_lock;
4843 /* Add filter if it doesn't exist so then the adding of large
4844 * action always results in update
4846 INIT_LIST_HEAD(&l_head);
4848 fl_info.fltr_info = *f_info;
4849 LIST_ADD(&fl_info.list_entry, &l_head);
4851 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4852 hw->port_info->lport);
4853 if (ret == ICE_ERR_ALREADY_EXISTS)
4858 ice_acquire_lock(rule_lock);
4859 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4861 ret = ICE_ERR_BAD_PTR;
4865 /* Don't enable counter for a filter for which sw marker was enabled */
4866 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4867 ret = ICE_ERR_PARAM;
4871 /* If a counter was already enabled then don't need to add again */
4872 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4873 ret = ICE_ERR_ALREADY_EXISTS;
4877 /* Allocate a hardware table entry to VLAN counter */
4878 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4882 /* Allocate a hardware table entry to hold large act. Two actions for
4883 * counter based large action
4885 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4889 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4892 /* Update the switch rule to add the counter action */
4893 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4895 ice_release_lock(rule_lock);
4900 ice_release_lock(rule_lock);
4901 /* only remove entry if it did not exist previously */
4903 ret = ice_remove_mac(hw, &l_head);
4908 /* This is mapping table entry that maps every word within a given protocol
4909 * structure to the real byte offset as per the specification of that
4911 * for example dst address is 3 words in ethertype header and corresponding
4912 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4913 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4914 * matching entry describing its field. This needs to be updated if new
4915 * structure is added to that union.
4917 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4918 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4919 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4920 { ICE_ETYPE_OL, { 0 } },
4921 { ICE_VLAN_OFOS, { 0, 2 } },
4922 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4923 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4924 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4925 26, 28, 30, 32, 34, 36, 38 } },
4926 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4927 26, 28, 30, 32, 34, 36, 38 } },
4928 { ICE_TCP_IL, { 0, 2 } },
4929 { ICE_UDP_OF, { 0, 2 } },
4930 { ICE_UDP_ILOS, { 0, 2 } },
4931 { ICE_SCTP_IL, { 0, 2 } },
4932 { ICE_VXLAN, { 8, 10, 12, 14 } },
4933 { ICE_GENEVE, { 8, 10, 12, 14 } },
4934 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4935 { ICE_NVGRE, { 0, 2, 4, 6 } },
4936 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4937 { ICE_PPPOE, { 0, 2, 4, 6 } },
4940 /* The following table describes preferred grouping of recipes.
4941 * If a recipe that needs to be programmed is a superset or matches one of the
4942 * following combinations, then the recipe needs to be chained as per the
4946 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4947 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4948 { ICE_MAC_IL, ICE_MAC_IL_HW },
4949 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4950 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4951 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4952 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4953 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4954 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4955 { ICE_TCP_IL, ICE_TCP_IL_HW },
4956 { ICE_UDP_OF, ICE_UDP_OF_HW },
4957 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4958 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4959 { ICE_VXLAN, ICE_UDP_OF_HW },
4960 { ICE_GENEVE, ICE_UDP_OF_HW },
4961 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4962 { ICE_NVGRE, ICE_GRE_OF_HW },
4963 { ICE_GTP, ICE_UDP_OF_HW },
4964 { ICE_PPPOE, ICE_PPPOE_HW },
4968 * ice_find_recp - find a recipe
4969 * @hw: pointer to the hardware structure
4970 * @lkup_exts: extension sequence to match
4972 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4974 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4976 bool refresh_required = true;
4977 struct ice_sw_recipe *recp;
4980 /* Walk through existing recipes to find a match */
4981 recp = hw->switch_info->recp_list;
4982 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4983 /* If recipe was not created for this ID, in SW bookkeeping,
4984 * check if FW has an entry for this recipe. If the FW has an
4985 * entry update it in our SW bookkeeping and continue with the
4988 if (!recp[i].recp_created)
4989 if (ice_get_recp_frm_fw(hw,
4990 hw->switch_info->recp_list, i,
4994 /* Skip inverse action recipes */
4995 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4996 ICE_AQ_RECIPE_ACT_INV_ACT)
4999 /* if number of words we are looking for match */
5000 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5001 struct ice_fv_word *a = lkup_exts->fv_words;
5002 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5006 for (p = 0; p < lkup_exts->n_val_words; p++) {
5007 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5009 if (a[p].off == b[q].off &&
5010 a[p].prot_id == b[q].prot_id)
5011 /* Found the "p"th word in the
5016 /* After walking through all the words in the
5017 * "i"th recipe if "p"th word was not found then
5018 * this recipe is not what we are looking for.
5019 * So break out from this loop and try the next
5022 if (q >= recp[i].lkup_exts.n_val_words) {
5027 /* If for "i"th recipe the found was never set to false
5028 * then it means we found our match
5031 return i; /* Return the recipe ID */
5034 return ICE_MAX_NUM_RECIPES;
5038 * ice_prot_type_to_id - get protocol ID from protocol type
5039 * @type: protocol type
5040 * @id: pointer to variable that will receive the ID
5042 * Returns true if found, false otherwise
5044 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5048 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5049 if (ice_prot_id_tbl[i].type == type) {
5050 *id = ice_prot_id_tbl[i].protocol_id;
5057 * ice_find_valid_words - count valid words
5058 * @rule: advanced rule with lookup information
5059 * @lkup_exts: byte offset extractions of the words that are valid
5061 * calculate valid words in a lookup rule using mask value
5064 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5065 struct ice_prot_lkup_ext *lkup_exts)
5067 u8 j, word, prot_id, ret_val;
5069 if (!ice_prot_type_to_id(rule->type, &prot_id))
5072 word = lkup_exts->n_val_words;
5074 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5075 if (((u16 *)&rule->m_u)[j] &&
5076 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5077 /* No more space to accommodate */
5078 if (word >= ICE_MAX_CHAIN_WORDS)
5080 lkup_exts->fv_words[word].off =
5081 ice_prot_ext[rule->type].offs[j];
5082 lkup_exts->fv_words[word].prot_id =
5083 ice_prot_id_tbl[rule->type].protocol_id;
5084 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5088 ret_val = word - lkup_exts->n_val_words;
5089 lkup_exts->n_val_words = word;
5095 * ice_create_first_fit_recp_def - Create a recipe grouping
5096 * @hw: pointer to the hardware structure
5097 * @lkup_exts: an array of protocol header extractions
5098 * @rg_list: pointer to a list that stores new recipe groups
5099 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5101 * Using first fit algorithm, take all the words that are still not done
5102 * and start grouping them in 4-word groups. Each group makes up one
5105 static enum ice_status
5106 ice_create_first_fit_recp_def(struct ice_hw *hw,
5107 struct ice_prot_lkup_ext *lkup_exts,
5108 struct LIST_HEAD_TYPE *rg_list,
5111 struct ice_pref_recipe_group *grp = NULL;
5116 /* Walk through every word in the rule to check if it is not done. If so
5117 * then this word needs to be part of a new recipe.
5119 for (j = 0; j < lkup_exts->n_val_words; j++)
5120 if (!ice_is_bit_set(lkup_exts->done, j)) {
5122 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5123 struct ice_recp_grp_entry *entry;
5125 entry = (struct ice_recp_grp_entry *)
5126 ice_malloc(hw, sizeof(*entry));
5128 return ICE_ERR_NO_MEMORY;
5129 LIST_ADD(&entry->l_entry, rg_list);
5130 grp = &entry->r_group;
5134 grp->pairs[grp->n_val_pairs].prot_id =
5135 lkup_exts->fv_words[j].prot_id;
5136 grp->pairs[grp->n_val_pairs].off =
5137 lkup_exts->fv_words[j].off;
5138 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5146 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5147 * @hw: pointer to the hardware structure
5148 * @fv_list: field vector with the extraction sequence information
5149 * @rg_list: recipe groupings with protocol-offset pairs
5151 * Helper function to fill in the field vector indices for protocol-offset
5152 * pairs. These indexes are then ultimately programmed into a recipe.
5154 static enum ice_status
5155 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5156 struct LIST_HEAD_TYPE *rg_list)
5158 struct ice_sw_fv_list_entry *fv;
5159 struct ice_recp_grp_entry *rg;
5160 struct ice_fv_word *fv_ext;
5162 if (LIST_EMPTY(fv_list))
5165 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5166 fv_ext = fv->fv_ptr->ew;
5168 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5171 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5172 struct ice_fv_word *pr;
5177 pr = &rg->r_group.pairs[i];
5178 mask = rg->r_group.mask[i];
5180 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5181 if (fv_ext[j].prot_id == pr->prot_id &&
5182 fv_ext[j].off == pr->off) {
5185 /* Store index of field vector */
5187 /* Mask is given by caller as big
5188 * endian, but sent to FW as little
5191 rg->fv_mask[i] = mask << 8 | mask >> 8;
5195 /* Protocol/offset could not be found, caller gave an
5199 return ICE_ERR_PARAM;
5207 * ice_find_free_recp_res_idx - find free result indexes for recipe
5208 * @hw: pointer to hardware structure
5209 * @profiles: bitmap of profiles that will be associated with the new recipe
5210 * @free_idx: pointer to variable to receive the free index bitmap
5212 * The algorithm used here is:
5213 * 1. When creating a new recipe, create a set P which contains all
5214 * Profiles that will be associated with our new recipe
5216 * 2. For each Profile p in set P:
5217 * a. Add all recipes associated with Profile p into set R
5218 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5219 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5220 * i. Or just assume they all have the same possible indexes:
5222 * i.e., PossibleIndexes = 0x0000F00000000000
5224 * 3. For each Recipe r in set R:
5225 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5226 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5228 * FreeIndexes will contain the bits indicating the indexes free for use,
5229 * then the code needs to update the recipe[r].used_result_idx_bits to
5230 * indicate which indexes were selected for use by this recipe.
5233 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5234 ice_bitmap_t *free_idx)
5236 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5237 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5238 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5242 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5243 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5244 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5245 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5247 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5248 ice_set_bit(count, possible_idx);
5250 /* For each profile we are going to associate the recipe with, add the
5251 * recipes that are associated with that profile. This will give us
5252 * the set of recipes that our recipe may collide with. Also, determine
5253 * what possible result indexes are usable given this set of profiles.
5256 while (ICE_MAX_NUM_PROFILES >
5257 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5258 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5259 ICE_MAX_NUM_RECIPES);
5260 ice_and_bitmap(possible_idx, possible_idx,
5261 hw->switch_info->prof_res_bm[bit],
5266 /* For each recipe that our new recipe may collide with, determine
5267 * which indexes have been used.
5269 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5270 if (ice_is_bit_set(recipes, bit)) {
5271 ice_or_bitmap(used_idx, used_idx,
5272 hw->switch_info->recp_list[bit].res_idxs,
5276 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5278 /* return number of free indexes */
5281 while (ICE_MAX_FV_WORDS >
5282 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5291 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5292 * @hw: pointer to hardware structure
5293 * @rm: recipe management list entry
5294 * @match_tun: if field vector index for tunnel needs to be programmed
5295 * @profiles: bitmap of profiles that will be assocated.
5297 static enum ice_status
5298 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5299 bool match_tun, ice_bitmap_t *profiles)
5301 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5302 struct ice_aqc_recipe_data_elem *tmp;
5303 struct ice_aqc_recipe_data_elem *buf;
5304 struct ice_recp_grp_entry *entry;
5305 enum ice_status status;
5311 /* When more than one recipe are required, another recipe is needed to
5312 * chain them together. Matching a tunnel metadata ID takes up one of
5313 * the match fields in the chaining recipe reducing the number of
5314 * chained recipes by one.
5316 /* check number of free result indices */
5317 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5318 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5320 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5321 free_res_idx, rm->n_grp_count);
5323 if (rm->n_grp_count > 1) {
5324 if (rm->n_grp_count > free_res_idx)
5325 return ICE_ERR_MAX_LIMIT;
5330 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5331 ICE_MAX_NUM_RECIPES,
5334 return ICE_ERR_NO_MEMORY;
5336 buf = (struct ice_aqc_recipe_data_elem *)
5337 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5339 status = ICE_ERR_NO_MEMORY;
5343 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5344 recipe_count = ICE_MAX_NUM_RECIPES;
5345 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5347 if (status || recipe_count == 0)
5350 /* Allocate the recipe resources, and configure them according to the
5351 * match fields from protocol headers and extracted field vectors.
5353 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5354 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5357 status = ice_alloc_recipe(hw, &entry->rid);
5361 /* Clear the result index of the located recipe, as this will be
5362 * updated, if needed, later in the recipe creation process.
5364 tmp[0].content.result_indx = 0;
5366 buf[recps] = tmp[0];
5367 buf[recps].recipe_indx = (u8)entry->rid;
5368 /* if the recipe is a non-root recipe RID should be programmed
5369 * as 0 for the rules to be applied correctly.
5371 buf[recps].content.rid = 0;
5372 ice_memset(&buf[recps].content.lkup_indx, 0,
5373 sizeof(buf[recps].content.lkup_indx),
5376 /* All recipes use look-up index 0 to match switch ID. */
5377 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5378 buf[recps].content.mask[0] =
5379 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5380 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5383 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5384 buf[recps].content.lkup_indx[i] = 0x80;
5385 buf[recps].content.mask[i] = 0;
5388 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5389 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5390 buf[recps].content.mask[i + 1] =
5391 CPU_TO_LE16(entry->fv_mask[i]);
5394 if (rm->n_grp_count > 1) {
5395 /* Checks to see if there really is a valid result index
5398 if (chain_idx >= ICE_MAX_FV_WORDS) {
5399 ice_debug(hw, ICE_DBG_SW,
5400 "No chain index available\n");
5401 status = ICE_ERR_MAX_LIMIT;
5405 entry->chain_idx = chain_idx;
5406 buf[recps].content.result_indx =
5407 ICE_AQ_RECIPE_RESULT_EN |
5408 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5409 ICE_AQ_RECIPE_RESULT_DATA_M);
5410 ice_clear_bit(chain_idx, result_idx_bm);
5411 chain_idx = ice_find_first_bit(result_idx_bm,
5415 /* fill recipe dependencies */
5416 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5417 ICE_MAX_NUM_RECIPES);
5418 ice_set_bit(buf[recps].recipe_indx,
5419 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5420 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5424 if (rm->n_grp_count == 1) {
5425 rm->root_rid = buf[0].recipe_indx;
5426 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5427 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5428 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5429 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5430 sizeof(buf[0].recipe_bitmap),
5431 ICE_NONDMA_TO_NONDMA);
5433 status = ICE_ERR_BAD_PTR;
5436 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5437 * the recipe which is getting created if specified
5438 * by user. Usually any advanced switch filter, which results
5439 * into new extraction sequence, ended up creating a new recipe
5440 * of type ROOT and usually recipes are associated with profiles
5441 * Switch rule referreing newly created recipe, needs to have
5442 * either/or 'fwd' or 'join' priority, otherwise switch rule
5443 * evaluation will not happen correctly. In other words, if
5444 * switch rule to be evaluated on priority basis, then recipe
5445 * needs to have priority, otherwise it will be evaluated last.
5447 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5449 struct ice_recp_grp_entry *last_chain_entry;
5452 /* Allocate the last recipe that will chain the outcomes of the
5453 * other recipes together
5455 status = ice_alloc_recipe(hw, &rid);
5459 buf[recps].recipe_indx = (u8)rid;
5460 buf[recps].content.rid = (u8)rid;
5461 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5462 /* the new entry created should also be part of rg_list to
5463 * make sure we have complete recipe
5465 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5466 sizeof(*last_chain_entry));
5467 if (!last_chain_entry) {
5468 status = ICE_ERR_NO_MEMORY;
5471 last_chain_entry->rid = rid;
5472 ice_memset(&buf[recps].content.lkup_indx, 0,
5473 sizeof(buf[recps].content.lkup_indx),
5475 /* All recipes use look-up index 0 to match switch ID. */
5476 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5477 buf[recps].content.mask[0] =
5478 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5479 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5480 buf[recps].content.lkup_indx[i] =
5481 ICE_AQ_RECIPE_LKUP_IGNORE;
5482 buf[recps].content.mask[i] = 0;
5486 /* update r_bitmap with the recp that is used for chaining */
5487 ice_set_bit(rid, rm->r_bitmap);
5488 /* this is the recipe that chains all the other recipes so it
5489 * should not have a chaining ID to indicate the same
5491 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5492 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5494 last_chain_entry->fv_idx[i] = entry->chain_idx;
5495 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5496 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5497 ice_set_bit(entry->rid, rm->r_bitmap);
5499 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5500 if (sizeof(buf[recps].recipe_bitmap) >=
5501 sizeof(rm->r_bitmap)) {
5502 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5503 sizeof(buf[recps].recipe_bitmap),
5504 ICE_NONDMA_TO_NONDMA);
5506 status = ICE_ERR_BAD_PTR;
5509 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5511 /* To differentiate among different UDP tunnels, a meta data ID
5515 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5516 buf[recps].content.mask[i] =
5517 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5521 rm->root_rid = (u8)rid;
5523 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5527 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5528 ice_release_change_lock(hw);
5532 /* Every recipe that just got created add it to the recipe
5535 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5536 struct ice_switch_info *sw = hw->switch_info;
5537 bool is_root, idx_found = false;
5538 struct ice_sw_recipe *recp;
5539 u16 idx, buf_idx = 0;
5541 /* find buffer index for copying some data */
5542 for (idx = 0; idx < rm->n_grp_count; idx++)
5543 if (buf[idx].recipe_indx == entry->rid) {
5549 status = ICE_ERR_OUT_OF_RANGE;
5553 recp = &sw->recp_list[entry->rid];
5554 is_root = (rm->root_rid == entry->rid);
5555 recp->is_root = is_root;
5557 recp->root_rid = entry->rid;
5558 recp->big_recp = (is_root && rm->n_grp_count > 1);
5560 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5561 entry->r_group.n_val_pairs *
5562 sizeof(struct ice_fv_word),
5563 ICE_NONDMA_TO_NONDMA);
5565 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5566 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5568 /* Copy non-result fv index values and masks to recipe. This
5569 * call will also update the result recipe bitmask.
5571 ice_collect_result_idx(&buf[buf_idx], recp);
5573 /* for non-root recipes, also copy to the root, this allows
5574 * easier matching of a complete chained recipe
5577 ice_collect_result_idx(&buf[buf_idx],
5578 &sw->recp_list[rm->root_rid]);
5580 recp->n_ext_words = entry->r_group.n_val_pairs;
5581 recp->chain_idx = entry->chain_idx;
5582 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5583 recp->n_grp_count = rm->n_grp_count;
5584 recp->tun_type = rm->tun_type;
5585 recp->recp_created = true;
5600 * ice_create_recipe_group - creates recipe group
5601 * @hw: pointer to hardware structure
5602 * @rm: recipe management list entry
5603 * @lkup_exts: lookup elements
5605 static enum ice_status
5606 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5607 struct ice_prot_lkup_ext *lkup_exts)
5609 enum ice_status status;
5612 rm->n_grp_count = 0;
5614 /* Create recipes for words that are marked not done by packing them
5617 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5618 &rm->rg_list, &recp_count);
5620 rm->n_grp_count += recp_count;
5621 rm->n_ext_words = lkup_exts->n_val_words;
5622 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5623 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5624 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5625 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5632 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5633 * @hw: pointer to hardware structure
5634 * @lkups: lookup elements or match criteria for the advanced recipe, one
5635 * structure per protocol header
5636 * @lkups_cnt: number of protocols
5637 * @bm: bitmap of field vectors to consider
5638 * @fv_list: pointer to a list that holds the returned field vectors
5640 static enum ice_status
5641 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5642 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5644 enum ice_status status;
5648 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5650 return ICE_ERR_NO_MEMORY;
5652 for (i = 0; i < lkups_cnt; i++)
5653 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5654 status = ICE_ERR_CFG;
5658 /* Find field vectors that include all specified protocol types */
5659 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5662 ice_free(hw, prot_ids);
5667 * ice_add_special_words - Add words that are not protocols, such as metadata
5668 * @rinfo: other information regarding the rule e.g. priority and action info
5669 * @lkup_exts: lookup word structure
5671 static enum ice_status
5672 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5673 struct ice_prot_lkup_ext *lkup_exts)
5675 /* If this is a tunneled packet, then add recipe index to match the
5676 * tunnel bit in the packet metadata flags.
5678 if (rinfo->tun_type != ICE_NON_TUN) {
5679 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5680 u8 word = lkup_exts->n_val_words++;
5682 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5683 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5685 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5687 return ICE_ERR_MAX_LIMIT;
5694 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5695 * @hw: pointer to hardware structure
5696 * @rinfo: other information regarding the rule e.g. priority and action info
5697 * @bm: pointer to memory for returning the bitmap of field vectors
5700 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5703 enum ice_prof_type prof_type;
5705 switch (rinfo->tun_type) {
5707 prof_type = ICE_PROF_NON_TUN;
5709 case ICE_ALL_TUNNELS:
5710 prof_type = ICE_PROF_TUN_ALL;
5712 case ICE_SW_TUN_VXLAN_GPE:
5713 case ICE_SW_TUN_GENEVE:
5714 case ICE_SW_TUN_VXLAN:
5715 case ICE_SW_TUN_UDP:
5716 case ICE_SW_TUN_GTP:
5717 prof_type = ICE_PROF_TUN_UDP;
5719 case ICE_SW_TUN_NVGRE:
5720 prof_type = ICE_PROF_TUN_GRE;
5722 case ICE_SW_TUN_PPPOE:
5723 prof_type = ICE_PROF_TUN_PPPOE;
5725 case ICE_SW_TUN_AND_NON_TUN:
5727 prof_type = ICE_PROF_ALL;
5731 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5735 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5736 * @hw: pointer to hardware structure
5737 * @lkups: lookup elements or match criteria for the advanced recipe, one
5738 * structure per protocol header
5739 * @lkups_cnt: number of protocols
5740 * @rinfo: other information regarding the rule e.g. priority and action info
5741 * @rid: return the recipe ID of the recipe created
5743 static enum ice_status
5744 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5745 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5747 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5748 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5749 struct ice_prot_lkup_ext *lkup_exts;
5750 struct ice_recp_grp_entry *r_entry;
5751 struct ice_sw_fv_list_entry *fvit;
5752 struct ice_recp_grp_entry *r_tmp;
5753 struct ice_sw_fv_list_entry *tmp;
5754 enum ice_status status = ICE_SUCCESS;
5755 struct ice_sw_recipe *rm;
5756 bool match_tun = false;
5760 return ICE_ERR_PARAM;
5762 lkup_exts = (struct ice_prot_lkup_ext *)
5763 ice_malloc(hw, sizeof(*lkup_exts));
5765 return ICE_ERR_NO_MEMORY;
5767 /* Determine the number of words to be matched and if it exceeds a
5768 * recipe's restrictions
5770 for (i = 0; i < lkups_cnt; i++) {
5773 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5774 status = ICE_ERR_CFG;
5775 goto err_free_lkup_exts;
5778 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5780 status = ICE_ERR_CFG;
5781 goto err_free_lkup_exts;
5785 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5787 status = ICE_ERR_NO_MEMORY;
5788 goto err_free_lkup_exts;
5791 /* Get field vectors that contain fields extracted from all the protocol
5792 * headers being programmed.
5794 INIT_LIST_HEAD(&rm->fv_list);
5795 INIT_LIST_HEAD(&rm->rg_list);
5797 /* Get bitmap of field vectors (profiles) that are compatible with the
5798 * rule request; only these will be searched in the subsequent call to
5801 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5803 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5807 /* Group match words into recipes using preferred recipe grouping
5810 status = ice_create_recipe_group(hw, rm, lkup_exts);
5814 /* There is only profile for UDP tunnels. So, it is necessary to use a
5815 * metadata ID flag to differentiate different tunnel types. A separate
5816 * recipe needs to be used for the metadata.
5818 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5819 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5820 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5823 /* set the recipe priority if specified */
5824 rm->priority = (u8)rinfo->priority;
5826 /* Find offsets from the field vector. Pick the first one for all the
5829 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5833 /* get bitmap of all profiles the recipe will be associated with */
5834 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5835 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5837 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5838 ice_set_bit((u16)fvit->profile_id, profiles);
5841 /* Create any special protocol/offset pairs, such as looking at tunnel
5842 * bits by extracting metadata
5844 status = ice_add_special_words(rinfo, lkup_exts);
5846 goto err_free_lkup_exts;
5848 /* Look for a recipe which matches our requested fv / mask list */
5849 *rid = ice_find_recp(hw, lkup_exts);
5850 if (*rid < ICE_MAX_NUM_RECIPES)
5851 /* Success if found a recipe that match the existing criteria */
5854 /* Recipe we need does not exist, add a recipe */
5855 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5859 /* Associate all the recipes created with all the profiles in the
5860 * common field vector.
5862 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5864 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5867 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5868 (u8 *)r_bitmap, NULL);
5872 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5873 ICE_MAX_NUM_RECIPES);
5874 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5878 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5881 ice_release_change_lock(hw);
5886 /* Update profile to recipe bitmap array */
5887 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5888 ICE_MAX_NUM_RECIPES);
5890 /* Update recipe to profile bitmap array */
5891 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5892 if (ice_is_bit_set(r_bitmap, j))
5893 ice_set_bit((u16)fvit->profile_id,
5894 recipe_to_profile[j]);
5897 *rid = rm->root_rid;
5898 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5899 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5901 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5902 ice_recp_grp_entry, l_entry) {
5903 LIST_DEL(&r_entry->l_entry);
5904 ice_free(hw, r_entry);
5907 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5909 LIST_DEL(&fvit->list_entry);
5914 ice_free(hw, rm->root_buf);
5919 ice_free(hw, lkup_exts);
5925 * ice_find_dummy_packet - find dummy packet by tunnel type
5927 * @lkups: lookup elements or match criteria for the advanced recipe, one
5928 * structure per protocol header
5929 * @lkups_cnt: number of protocols
5930 * @tun_type: tunnel type from the match criteria
5931 * @pkt: dummy packet to fill according to filter match criteria
5932 * @pkt_len: packet length of dummy packet
5933 * @offsets: pointer to receive the pointer to the offsets for the packet
5936 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5937 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5939 const struct ice_dummy_pkt_offsets **offsets)
5941 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5945 for (i = 0; i < lkups_cnt; i++) {
5946 if (lkups[i].type == ICE_UDP_ILOS)
5948 else if (lkups[i].type == ICE_TCP_IL)
5950 else if (lkups[i].type == ICE_IPV6_OFOS)
5952 else if (lkups[i].type == ICE_VLAN_OFOS)
5954 else if (lkups[i].type == ICE_IPV4_OFOS &&
5955 lkups[i].h_u.ipv4_hdr.protocol ==
5956 ICE_IPV4_NVGRE_PROTO_ID &&
5957 lkups[i].m_u.ipv4_hdr.protocol ==
5960 else if (lkups[i].type == ICE_PPPOE &&
5961 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5962 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
5963 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
5966 else if (lkups[i].type == ICE_ETYPE_OL &&
5967 lkups[i].h_u.ethertype.ethtype_id ==
5968 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
5969 lkups[i].m_u.ethertype.ethtype_id ==
5974 if (tun_type == ICE_SW_TUN_GTP) {
5975 *pkt = dummy_udp_gtp_packet;
5976 *pkt_len = sizeof(dummy_udp_gtp_packet);
5977 *offsets = dummy_udp_gtp_packet_offsets;
5980 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
5981 *pkt = dummy_pppoe_ipv6_packet;
5982 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
5983 *offsets = dummy_pppoe_packet_offsets;
5985 } else if (tun_type == ICE_SW_TUN_PPPOE) {
5986 *pkt = dummy_pppoe_ipv4_packet;
5987 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
5988 *offsets = dummy_pppoe_packet_offsets;
5992 if (tun_type == ICE_ALL_TUNNELS) {
5993 *pkt = dummy_gre_udp_packet;
5994 *pkt_len = sizeof(dummy_gre_udp_packet);
5995 *offsets = dummy_gre_udp_packet_offsets;
5999 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6001 *pkt = dummy_gre_tcp_packet;
6002 *pkt_len = sizeof(dummy_gre_tcp_packet);
6003 *offsets = dummy_gre_tcp_packet_offsets;
6007 *pkt = dummy_gre_udp_packet;
6008 *pkt_len = sizeof(dummy_gre_udp_packet);
6009 *offsets = dummy_gre_udp_packet_offsets;
6013 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6014 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6016 *pkt = dummy_udp_tun_tcp_packet;
6017 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6018 *offsets = dummy_udp_tun_tcp_packet_offsets;
6022 *pkt = dummy_udp_tun_udp_packet;
6023 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6024 *offsets = dummy_udp_tun_udp_packet_offsets;
6030 *pkt = dummy_vlan_udp_packet;
6031 *pkt_len = sizeof(dummy_vlan_udp_packet);
6032 *offsets = dummy_vlan_udp_packet_offsets;
6035 *pkt = dummy_udp_packet;
6036 *pkt_len = sizeof(dummy_udp_packet);
6037 *offsets = dummy_udp_packet_offsets;
6039 } else if (udp && ipv6) {
6041 *pkt = dummy_vlan_udp_ipv6_packet;
6042 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6043 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6046 *pkt = dummy_udp_ipv6_packet;
6047 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6048 *offsets = dummy_udp_ipv6_packet_offsets;
6050 } else if ((tcp && ipv6) || ipv6) {
6052 *pkt = dummy_vlan_tcp_ipv6_packet;
6053 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6054 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6057 *pkt = dummy_tcp_ipv6_packet;
6058 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6059 *offsets = dummy_tcp_ipv6_packet_offsets;
6064 *pkt = dummy_vlan_tcp_packet;
6065 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6066 *offsets = dummy_vlan_tcp_packet_offsets;
6068 *pkt = dummy_tcp_packet;
6069 *pkt_len = sizeof(dummy_tcp_packet);
6070 *offsets = dummy_tcp_packet_offsets;
6075 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6077 * @lkups: lookup elements or match criteria for the advanced recipe, one
6078 * structure per protocol header
6079 * @lkups_cnt: number of protocols
6080 * @s_rule: stores rule information from the match criteria
6081 * @dummy_pkt: dummy packet to fill according to filter match criteria
6082 * @pkt_len: packet length of dummy packet
6083 * @offsets: offset info for the dummy packet
6085 static enum ice_status
6086 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6087 struct ice_aqc_sw_rules_elem *s_rule,
6088 const u8 *dummy_pkt, u16 pkt_len,
6089 const struct ice_dummy_pkt_offsets *offsets)
6094 /* Start with a packet with a pre-defined/dummy content. Then, fill
6095 * in the header values to be looked up or matched.
6097 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6099 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6101 for (i = 0; i < lkups_cnt; i++) {
6102 enum ice_protocol_type type;
6103 u16 offset = 0, len = 0, j;
6106 /* find the start of this layer; it should be found since this
6107 * was already checked when search for the dummy packet
6109 type = lkups[i].type;
6110 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6111 if (type == offsets[j].type) {
6112 offset = offsets[j].offset;
6117 /* this should never happen in a correct calling sequence */
6119 return ICE_ERR_PARAM;
6121 switch (lkups[i].type) {
6124 len = sizeof(struct ice_ether_hdr);
6127 len = sizeof(struct ice_ethtype_hdr);
6130 len = sizeof(struct ice_vlan_hdr);
6134 len = sizeof(struct ice_ipv4_hdr);
6138 len = sizeof(struct ice_ipv6_hdr);
6143 len = sizeof(struct ice_l4_hdr);
6146 len = sizeof(struct ice_sctp_hdr);
6149 len = sizeof(struct ice_nvgre);
6154 len = sizeof(struct ice_udp_tnl_hdr);
6158 len = sizeof(struct ice_udp_gtp_hdr);
6161 len = sizeof(struct ice_pppoe_hdr);
6164 return ICE_ERR_PARAM;
6167 /* the length should be a word multiple */
6168 if (len % ICE_BYTES_PER_WORD)
6171 /* We have the offset to the header start, the length, the
6172 * caller's header values and mask. Use this information to
6173 * copy the data into the dummy packet appropriately based on
6174 * the mask. Note that we need to only write the bits as
6175 * indicated by the mask to make sure we don't improperly write
6176 * over any significant packet data.
6178 for (j = 0; j < len / sizeof(u16); j++)
6179 if (((u16 *)&lkups[i].m_u)[j])
6180 ((u16 *)(pkt + offset))[j] =
6181 (((u16 *)(pkt + offset))[j] &
6182 ~((u16 *)&lkups[i].m_u)[j]) |
6183 (((u16 *)&lkups[i].h_u)[j] &
6184 ((u16 *)&lkups[i].m_u)[j]);
6187 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6193 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6194 * @hw: pointer to the hardware structure
6195 * @tun_type: tunnel type
6196 * @pkt: dummy packet to fill in
6197 * @offsets: offset info for the dummy packet
6199 static enum ice_status
6200 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6201 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6206 case ICE_SW_TUN_AND_NON_TUN:
6207 case ICE_SW_TUN_VXLAN_GPE:
6208 case ICE_SW_TUN_VXLAN:
6209 case ICE_SW_TUN_UDP:
6210 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6214 case ICE_SW_TUN_GENEVE:
6215 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6220 /* Nothing needs to be done for this tunnel type */
6224 /* Find the outer UDP protocol header and insert the port number */
6225 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6226 if (offsets[i].type == ICE_UDP_OF) {
6227 struct ice_l4_hdr *hdr;
6230 offset = offsets[i].offset;
6231 hdr = (struct ice_l4_hdr *)&pkt[offset];
6232 hdr->dst_port = CPU_TO_BE16(open_port);
6242 * ice_find_adv_rule_entry - Search a rule entry
6243 * @hw: pointer to the hardware structure
6244 * @lkups: lookup elements or match criteria for the advanced recipe, one
6245 * structure per protocol header
6246 * @lkups_cnt: number of protocols
6247 * @recp_id: recipe ID for which we are finding the rule
6248 * @rinfo: other information regarding the rule e.g. priority and action info
6250 * Helper function to search for a given advance rule entry
6251 * Returns pointer to entry storing the rule if found
6253 static struct ice_adv_fltr_mgmt_list_entry *
6254 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6255 u16 lkups_cnt, u16 recp_id,
6256 struct ice_adv_rule_info *rinfo)
6258 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6259 struct ice_switch_info *sw = hw->switch_info;
6262 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6263 ice_adv_fltr_mgmt_list_entry, list_entry) {
6264 bool lkups_matched = true;
6266 if (lkups_cnt != list_itr->lkups_cnt)
6268 for (i = 0; i < list_itr->lkups_cnt; i++)
6269 if (memcmp(&list_itr->lkups[i], &lkups[i],
6271 lkups_matched = false;
6274 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6275 rinfo->tun_type == list_itr->rule_info.tun_type &&
6283 * ice_adv_add_update_vsi_list
6284 * @hw: pointer to the hardware structure
6285 * @m_entry: pointer to current adv filter management list entry
6286 * @cur_fltr: filter information from the book keeping entry
6287 * @new_fltr: filter information with the new VSI to be added
6289 * Call AQ command to add or update previously created VSI list with new VSI.
6291 * Helper function to do book keeping associated with adding filter information
6292 * The algorithm to do the booking keeping is described below :
6293 * When a VSI needs to subscribe to a given advanced filter
6294 * if only one VSI has been added till now
6295 * Allocate a new VSI list and add two VSIs
6296 * to this list using switch rule command
6297 * Update the previously created switch rule with the
6298 * newly created VSI list ID
6299 * if a VSI list was previously created
6300 * Add the new VSI to the previously created VSI list set
6301 * using the update switch rule command
6303 static enum ice_status
6304 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6305 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6306 struct ice_adv_rule_info *cur_fltr,
6307 struct ice_adv_rule_info *new_fltr)
6309 enum ice_status status;
6310 u16 vsi_list_id = 0;
6312 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6313 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6314 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6315 return ICE_ERR_NOT_IMPL;
6317 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6318 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6319 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6320 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6321 return ICE_ERR_NOT_IMPL;
6323 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6324 /* Only one entry existed in the mapping and it was not already
6325 * a part of a VSI list. So, create a VSI list with the old and
6328 struct ice_fltr_info tmp_fltr;
6329 u16 vsi_handle_arr[2];
6331 /* A rule already exists with the new VSI being added */
6332 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6333 new_fltr->sw_act.fwd_id.hw_vsi_id)
6334 return ICE_ERR_ALREADY_EXISTS;
6336 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6337 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6338 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6344 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6345 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6346 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6347 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6348 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6350 /* Update the previous switch rule of "forward to VSI" to
6353 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6357 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6358 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6359 m_entry->vsi_list_info =
6360 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6363 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6365 if (!m_entry->vsi_list_info)
6368 /* A rule already exists with the new VSI being added */
6369 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6372 /* Update the previously created VSI list set with
6373 * the new VSI ID passed in
6375 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6377 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6379 ice_aqc_opc_update_sw_rules,
6381 /* update VSI list mapping info with new VSI ID */
6383 ice_set_bit(vsi_handle,
6384 m_entry->vsi_list_info->vsi_map);
6387 m_entry->vsi_count++;
6392 * ice_add_adv_rule - helper function to create an advanced switch rule
6393 * @hw: pointer to the hardware structure
6394 * @lkups: information on the words that needs to be looked up. All words
6395 * together makes one recipe
6396 * @lkups_cnt: num of entries in the lkups array
6397 * @rinfo: other information related to the rule that needs to be programmed
6398 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6399 * ignored is case of error.
6401 * This function can program only 1 rule at a time. The lkups is used to
6402 * describe the all the words that forms the "lookup" portion of the recipe.
6403 * These words can span multiple protocols. Callers to this function need to
6404 * pass in a list of protocol headers with lookup information along and mask
6405 * that determines which words are valid from the given protocol header.
6406 * rinfo describes other information related to this rule such as forwarding
6407 * IDs, priority of this rule, etc.
6410 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6411 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6412 struct ice_rule_query_data *added_entry)
6414 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6415 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6416 const struct ice_dummy_pkt_offsets *pkt_offsets;
6417 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6418 struct LIST_HEAD_TYPE *rule_head;
6419 struct ice_switch_info *sw;
6420 enum ice_status status;
6421 const u8 *pkt = NULL;
6426 /* Initialize profile to result index bitmap */
6427 if (!hw->switch_info->prof_res_bm_init) {
6428 hw->switch_info->prof_res_bm_init = 1;
6429 ice_init_prof_result_bm(hw);
6433 return ICE_ERR_PARAM;
6435 /* get # of words we need to match */
6437 for (i = 0; i < lkups_cnt; i++) {
6440 ptr = (u16 *)&lkups[i].m_u;
6441 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6445 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6446 return ICE_ERR_PARAM;
6448 /* make sure that we can locate a dummy packet */
6449 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6452 status = ICE_ERR_PARAM;
6453 goto err_ice_add_adv_rule;
6456 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6457 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6458 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6459 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6462 vsi_handle = rinfo->sw_act.vsi_handle;
6463 if (!ice_is_vsi_valid(hw, vsi_handle))
6464 return ICE_ERR_PARAM;
6466 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6467 rinfo->sw_act.fwd_id.hw_vsi_id =
6468 ice_get_hw_vsi_num(hw, vsi_handle);
6469 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6470 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6472 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6475 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6477 /* we have to add VSI to VSI_LIST and increment vsi_count.
6478 * Also Update VSI list so that we can change forwarding rule
6479 * if the rule already exists, we will check if it exists with
6480 * same vsi_id, if not then add it to the VSI list if it already
6481 * exists if not then create a VSI list and add the existing VSI
6482 * ID and the new VSI ID to the list
6483 * We will add that VSI to the list
6485 status = ice_adv_add_update_vsi_list(hw, m_entry,
6486 &m_entry->rule_info,
6489 added_entry->rid = rid;
6490 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6491 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6495 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6496 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6498 return ICE_ERR_NO_MEMORY;
6499 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6500 switch (rinfo->sw_act.fltr_act) {
6501 case ICE_FWD_TO_VSI:
6502 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6503 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6504 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6507 act |= ICE_SINGLE_ACT_TO_Q;
6508 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6509 ICE_SINGLE_ACT_Q_INDEX_M;
6511 case ICE_FWD_TO_QGRP:
6512 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6513 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6514 act |= ICE_SINGLE_ACT_TO_Q;
6515 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6516 ICE_SINGLE_ACT_Q_INDEX_M;
6517 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6518 ICE_SINGLE_ACT_Q_REGION_M;
6520 case ICE_DROP_PACKET:
6521 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6522 ICE_SINGLE_ACT_VALID_BIT;
6525 status = ICE_ERR_CFG;
6526 goto err_ice_add_adv_rule;
6529 /* set the rule LOOKUP type based on caller specified 'RX'
6530 * instead of hardcoding it to be either LOOKUP_TX/RX
6532 * for 'RX' set the source to be the port number
6533 * for 'TX' set the source to be the source HW VSI number (determined
6537 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6538 s_rule->pdata.lkup_tx_rx.src =
6539 CPU_TO_LE16(hw->port_info->lport);
6541 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6542 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6545 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6546 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6548 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6549 pkt_len, pkt_offsets);
6551 goto err_ice_add_adv_rule;
6553 if (rinfo->tun_type != ICE_NON_TUN &&
6554 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6555 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6556 s_rule->pdata.lkup_tx_rx.hdr,
6559 goto err_ice_add_adv_rule;
6562 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6563 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6566 goto err_ice_add_adv_rule;
6567 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6568 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6570 status = ICE_ERR_NO_MEMORY;
6571 goto err_ice_add_adv_rule;
6574 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6575 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6576 ICE_NONDMA_TO_NONDMA);
6577 if (!adv_fltr->lkups) {
6578 status = ICE_ERR_NO_MEMORY;
6579 goto err_ice_add_adv_rule;
6582 adv_fltr->lkups_cnt = lkups_cnt;
6583 adv_fltr->rule_info = *rinfo;
6584 adv_fltr->rule_info.fltr_rule_id =
6585 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6586 sw = hw->switch_info;
6587 sw->recp_list[rid].adv_rule = true;
6588 rule_head = &sw->recp_list[rid].filt_rules;
6590 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6591 struct ice_fltr_info tmp_fltr;
6593 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6594 tmp_fltr.fltr_rule_id =
6595 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6596 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6597 tmp_fltr.fwd_id.hw_vsi_id =
6598 ice_get_hw_vsi_num(hw, vsi_handle);
6599 tmp_fltr.vsi_handle = vsi_handle;
6600 /* Update the previous switch rule of "forward to VSI" to
6603 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6605 goto err_ice_add_adv_rule;
6606 adv_fltr->vsi_count = 1;
6609 /* Add rule entry to book keeping list */
6610 LIST_ADD(&adv_fltr->list_entry, rule_head);
6612 added_entry->rid = rid;
6613 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6614 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6616 err_ice_add_adv_rule:
6617 if (status && adv_fltr) {
6618 ice_free(hw, adv_fltr->lkups);
6619 ice_free(hw, adv_fltr);
6622 ice_free(hw, s_rule);
6628 * ice_adv_rem_update_vsi_list
6629 * @hw: pointer to the hardware structure
6630 * @vsi_handle: VSI handle of the VSI to remove
6631 * @fm_list: filter management entry for which the VSI list management needs to
6634 static enum ice_status
6635 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6636 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6638 struct ice_vsi_list_map_info *vsi_list_info;
6639 enum ice_sw_lkup_type lkup_type;
6640 enum ice_status status;
6643 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6644 fm_list->vsi_count == 0)
6645 return ICE_ERR_PARAM;
6647 /* A rule with the VSI being removed does not exist */
6648 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6649 return ICE_ERR_DOES_NOT_EXIST;
6651 lkup_type = ICE_SW_LKUP_LAST;
6652 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6653 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6654 ice_aqc_opc_update_sw_rules,
6659 fm_list->vsi_count--;
6660 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6661 vsi_list_info = fm_list->vsi_list_info;
6662 if (fm_list->vsi_count == 1) {
6663 struct ice_fltr_info tmp_fltr;
6666 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6668 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6669 return ICE_ERR_OUT_OF_RANGE;
6671 /* Make sure VSI list is empty before removing it below */
6672 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6674 ice_aqc_opc_update_sw_rules,
6679 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6680 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6681 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6682 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6683 tmp_fltr.fwd_id.hw_vsi_id =
6684 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6685 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6686 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6688 /* Update the previous switch rule of "MAC forward to VSI" to
6689 * "MAC fwd to VSI list"
6691 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6693 ice_debug(hw, ICE_DBG_SW,
6694 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6695 tmp_fltr.fwd_id.hw_vsi_id, status);
6699 /* Remove the VSI list since it is no longer used */
6700 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6702 ice_debug(hw, ICE_DBG_SW,
6703 "Failed to remove VSI list %d, error %d\n",
6704 vsi_list_id, status);
6708 LIST_DEL(&vsi_list_info->list_entry);
6709 ice_free(hw, vsi_list_info);
6710 fm_list->vsi_list_info = NULL;
6717 * ice_rem_adv_rule - removes existing advanced switch rule
6718 * @hw: pointer to the hardware structure
6719 * @lkups: information on the words that needs to be looked up. All words
6720 * together makes one recipe
6721 * @lkups_cnt: num of entries in the lkups array
6722 * @rinfo: Its the pointer to the rule information for the rule
6724 * This function can be used to remove 1 rule at a time. The lkups is
6725 * used to describe all the words that forms the "lookup" portion of the
6726 * rule. These words can span multiple protocols. Callers to this function
6727 * need to pass in a list of protocol headers with lookup information along
6728 * and mask that determines which words are valid from the given protocol
6729 * header. rinfo describes other information related to this rule such as
6730 * forwarding IDs, priority of this rule, etc.
6733 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6734 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6736 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6737 struct ice_prot_lkup_ext lkup_exts;
6738 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6739 enum ice_status status = ICE_SUCCESS;
6740 bool remove_rule = false;
6741 u16 i, rid, vsi_handle;
6743 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6744 for (i = 0; i < lkups_cnt; i++) {
6747 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6750 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6755 /* Create any special protocol/offset pairs, such as looking at tunnel
6756 * bits by extracting metadata
6758 status = ice_add_special_words(rinfo, &lkup_exts);
6762 rid = ice_find_recp(hw, &lkup_exts);
6763 /* If did not find a recipe that match the existing criteria */
6764 if (rid == ICE_MAX_NUM_RECIPES)
6765 return ICE_ERR_PARAM;
6767 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6768 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6769 /* the rule is already removed */
6772 ice_acquire_lock(rule_lock);
6773 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6775 } else if (list_elem->vsi_count > 1) {
6776 list_elem->vsi_list_info->ref_cnt--;
6777 remove_rule = false;
6778 vsi_handle = rinfo->sw_act.vsi_handle;
6779 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6781 vsi_handle = rinfo->sw_act.vsi_handle;
6782 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6784 ice_release_lock(rule_lock);
6787 if (list_elem->vsi_count == 0)
6790 ice_release_lock(rule_lock);
6792 struct ice_aqc_sw_rules_elem *s_rule;
6795 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6797 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6800 return ICE_ERR_NO_MEMORY;
6801 s_rule->pdata.lkup_tx_rx.act = 0;
6802 s_rule->pdata.lkup_tx_rx.index =
6803 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6804 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6805 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6807 ice_aqc_opc_remove_sw_rules, NULL);
6808 if (status == ICE_SUCCESS) {
6809 ice_acquire_lock(rule_lock);
6810 LIST_DEL(&list_elem->list_entry);
6811 ice_free(hw, list_elem->lkups);
6812 ice_free(hw, list_elem);
6813 ice_release_lock(rule_lock);
6815 ice_free(hw, s_rule);
6821 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6822 * @hw: pointer to the hardware structure
6823 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6825 * This function is used to remove 1 rule at a time. The removal is based on
6826 * the remove_entry parameter. This function will remove rule for a given
6827 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6830 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6831 struct ice_rule_query_data *remove_entry)
6833 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6834 struct LIST_HEAD_TYPE *list_head;
6835 struct ice_adv_rule_info rinfo;
6836 struct ice_switch_info *sw;
6838 sw = hw->switch_info;
6839 if (!sw->recp_list[remove_entry->rid].recp_created)
6840 return ICE_ERR_PARAM;
6841 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6842 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6844 if (list_itr->rule_info.fltr_rule_id ==
6845 remove_entry->rule_id) {
6846 rinfo = list_itr->rule_info;
6847 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6848 return ice_rem_adv_rule(hw, list_itr->lkups,
6849 list_itr->lkups_cnt, &rinfo);
6852 return ICE_ERR_PARAM;
6856 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6858 * @hw: pointer to the hardware structure
6859 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6861 * This function is used to remove all the rules for a given VSI and as soon
6862 * as removing a rule fails, it will return immediately with the error code,
6863 * else it will return ICE_SUCCESS
6866 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6868 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6869 struct ice_vsi_list_map_info *map_info;
6870 struct LIST_HEAD_TYPE *list_head;
6871 struct ice_adv_rule_info rinfo;
6872 struct ice_switch_info *sw;
6873 enum ice_status status;
6874 u16 vsi_list_id = 0;
6877 sw = hw->switch_info;
6878 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6879 if (!sw->recp_list[rid].recp_created)
6881 if (!sw->recp_list[rid].adv_rule)
6883 list_head = &sw->recp_list[rid].filt_rules;
6885 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6886 ice_adv_fltr_mgmt_list_entry, list_entry) {
6887 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6892 rinfo = list_itr->rule_info;
6893 rinfo.sw_act.vsi_handle = vsi_handle;
6894 status = ice_rem_adv_rule(hw, list_itr->lkups,
6895 list_itr->lkups_cnt, &rinfo);
6905 * ice_replay_fltr - Replay all the filters stored by a specific list head
6906 * @hw: pointer to the hardware structure
6907 * @list_head: list for which filters needs to be replayed
6908 * @recp_id: Recipe ID for which rules need to be replayed
6910 static enum ice_status
6911 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6913 struct ice_fltr_mgmt_list_entry *itr;
6914 enum ice_status status = ICE_SUCCESS;
6915 struct ice_sw_recipe *recp_list;
6916 u8 lport = hw->port_info->lport;
6917 struct LIST_HEAD_TYPE l_head;
6919 if (LIST_EMPTY(list_head))
6922 recp_list = &hw->switch_info->recp_list[recp_id];
6923 /* Move entries from the given list_head to a temporary l_head so that
6924 * they can be replayed. Otherwise when trying to re-add the same
6925 * filter, the function will return already exists
6927 LIST_REPLACE_INIT(list_head, &l_head);
6929 /* Mark the given list_head empty by reinitializing it so filters
6930 * could be added again by *handler
6932 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6934 struct ice_fltr_list_entry f_entry;
6936 f_entry.fltr_info = itr->fltr_info;
6937 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6938 status = ice_add_rule_internal(hw, recp_list, lport,
6940 if (status != ICE_SUCCESS)
6945 /* Add a filter per VSI separately */
6950 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6952 if (!ice_is_vsi_valid(hw, vsi_handle))
6955 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6956 f_entry.fltr_info.vsi_handle = vsi_handle;
6957 f_entry.fltr_info.fwd_id.hw_vsi_id =
6958 ice_get_hw_vsi_num(hw, vsi_handle);
6959 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6960 if (recp_id == ICE_SW_LKUP_VLAN)
6961 status = ice_add_vlan_internal(hw, recp_list,
6964 status = ice_add_rule_internal(hw, recp_list,
6967 if (status != ICE_SUCCESS)
6972 /* Clear the filter management list */
6973 ice_rem_sw_rule_info(hw, &l_head);
6978 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6979 * @hw: pointer to the hardware structure
6981 * NOTE: This function does not clean up partially added filters on error.
6982 * It is up to caller of the function to issue a reset or fail early.
6984 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6986 struct ice_switch_info *sw = hw->switch_info;
6987 enum ice_status status = ICE_SUCCESS;
6990 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6991 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6993 status = ice_replay_fltr(hw, i, head);
6994 if (status != ICE_SUCCESS)
7001 * ice_replay_vsi_fltr - Replay filters for requested VSI
7002 * @hw: pointer to the hardware structure
7003 * @vsi_handle: driver VSI handle
7004 * @recp_id: Recipe ID for which rules need to be replayed
7005 * @list_head: list for which filters need to be replayed
7007 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7008 * It is required to pass valid VSI handle.
7010 static enum ice_status
7011 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7012 struct LIST_HEAD_TYPE *list_head)
7014 struct ice_fltr_mgmt_list_entry *itr;
7015 enum ice_status status = ICE_SUCCESS;
7016 struct ice_sw_recipe *recp_list;
7019 if (LIST_EMPTY(list_head))
7021 recp_list = &hw->switch_info->recp_list[recp_id];
7022 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7024 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7026 struct ice_fltr_list_entry f_entry;
7028 f_entry.fltr_info = itr->fltr_info;
7029 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7030 itr->fltr_info.vsi_handle == vsi_handle) {
7031 /* update the src in case it is VSI num */
7032 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7033 f_entry.fltr_info.src = hw_vsi_id;
7034 status = ice_add_rule_internal(hw, recp_list,
7035 hw->port_info->lport,
7037 if (status != ICE_SUCCESS)
7041 if (!itr->vsi_list_info ||
7042 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7044 /* Clearing it so that the logic can add it back */
7045 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7046 f_entry.fltr_info.vsi_handle = vsi_handle;
7047 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7048 /* update the src in case it is VSI num */
7049 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7050 f_entry.fltr_info.src = hw_vsi_id;
7051 if (recp_id == ICE_SW_LKUP_VLAN)
7052 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7054 status = ice_add_rule_internal(hw, recp_list,
7055 hw->port_info->lport,
7057 if (status != ICE_SUCCESS)
7065 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7066 * @hw: pointer to the hardware structure
7067 * @vsi_handle: driver VSI handle
7068 * @list_head: list for which filters need to be replayed
7070 * Replay the advanced rule for the given VSI.
7072 static enum ice_status
7073 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7074 struct LIST_HEAD_TYPE *list_head)
7076 struct ice_rule_query_data added_entry = { 0 };
7077 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7078 enum ice_status status = ICE_SUCCESS;
7080 if (LIST_EMPTY(list_head))
7082 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7084 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7085 u16 lk_cnt = adv_fltr->lkups_cnt;
7087 if (vsi_handle != rinfo->sw_act.vsi_handle)
7089 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7098 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7099 * @hw: pointer to the hardware structure
7100 * @vsi_handle: driver VSI handle
7102 * Replays filters for requested VSI via vsi_handle.
7104 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7106 struct ice_switch_info *sw = hw->switch_info;
7107 enum ice_status status;
7110 /* Update the recipes that were created */
7111 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7112 struct LIST_HEAD_TYPE *head;
7114 head = &sw->recp_list[i].filt_replay_rules;
7115 if (!sw->recp_list[i].adv_rule)
7116 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7118 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7119 if (status != ICE_SUCCESS)
7127 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7128 * @hw: pointer to the HW struct
7130 * Deletes the filter replay rules.
7132 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7134 struct ice_switch_info *sw = hw->switch_info;
7140 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7141 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7142 struct LIST_HEAD_TYPE *l_head;
7144 l_head = &sw->recp_list[i].filt_replay_rules;
7145 if (!sw->recp_list[i].adv_rule)
7146 ice_rem_sw_rule_info(hw, l_head);
7148 ice_rem_adv_rule_info(hw, l_head);