1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
15 #define ICE_IPV6_ETHER_ID 0x86DD
17 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
18 * struct to configure any switch filter rules.
19 * {DA (6 bytes), SA(6 bytes),
20 * Ether type (2 bytes for header without VLAN tag) OR
21 * VLAN tag (4 bytes for header with VLAN tag) }
23 * Word on Hardcoded values
24 * byte 0 = 0x2: to identify it as locally administered DA MAC
25 * byte 6 = 0x2: to identify it as locally administered SA MAC
26 * byte 12 = 0x81 & byte 13 = 0x00:
27 * In case of VLAN filter first two bytes defines ether type (0x8100)
28 * and remaining two bytes are placeholder for programming a given VLAN ID
29 * In case of Ether type filter it is treated as header without VLAN tag
30 * and byte 12 and 13 is used to program a given Ether type instead
32 #define DUMMY_ETH_HDR_LEN 16
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
38 (sizeof(struct ice_aqc_sw_rules_elem) - \
39 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
40 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
41 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
42 (sizeof(struct ice_aqc_sw_rules_elem) - \
43 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
44 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
45 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
46 (sizeof(struct ice_aqc_sw_rules_elem) - \
47 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
48 sizeof(struct ice_sw_rule_lg_act) - \
49 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
50 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
51 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
52 (sizeof(struct ice_aqc_sw_rules_elem) - \
53 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
54 sizeof(struct ice_sw_rule_vsi_list) - \
55 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
56 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
58 struct ice_dummy_pkt_offsets {
59 enum ice_protocol_type type;
60 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
63 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
66 { ICE_IPV4_OFOS, 14 },
71 { ICE_PROTOCOL_LAST, 0 },
74 static const u8 dummy_gre_tcp_packet[] = {
75 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
76 0x00, 0x00, 0x00, 0x00,
77 0x00, 0x00, 0x00, 0x00,
79 0x08, 0x00, /* ICE_ETYPE_OL 12 */
81 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
82 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x2F, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00,
87 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
88 0x00, 0x00, 0x00, 0x00,
90 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
91 0x00, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00,
95 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
96 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x06, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
102 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00,
104 0x50, 0x02, 0x20, 0x00,
105 0x00, 0x00, 0x00, 0x00
108 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
119 static const u8 dummy_gre_udp_packet[] = {
120 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
121 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00,
124 0x08, 0x00, /* ICE_ETYPE_OL 12 */
126 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
127 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x2F, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
132 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
133 0x00, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
136 0x00, 0x00, 0x00, 0x00,
137 0x00, 0x00, 0x00, 0x00,
140 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
141 0x00, 0x00, 0x00, 0x00,
142 0x00, 0x11, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
147 0x00, 0x08, 0x00, 0x00,
150 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
152 { ICE_ETYPE_OL, 12 },
153 { ICE_IPV4_OFOS, 14 },
157 { ICE_VXLAN_GPE, 42 },
161 { ICE_PROTOCOL_LAST, 0 },
164 static const u8 dummy_udp_tun_tcp_packet[] = {
165 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
166 0x00, 0x00, 0x00, 0x00,
167 0x00, 0x00, 0x00, 0x00,
169 0x08, 0x00, /* ICE_ETYPE_OL 12 */
171 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
172 0x00, 0x01, 0x00, 0x00,
173 0x40, 0x11, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
175 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
178 0x00, 0x46, 0x00, 0x00,
180 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
181 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
184 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00,
188 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
189 0x00, 0x01, 0x00, 0x00,
190 0x40, 0x06, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00,
192 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
195 0x00, 0x00, 0x00, 0x00,
196 0x00, 0x00, 0x00, 0x00,
197 0x50, 0x02, 0x20, 0x00,
198 0x00, 0x00, 0x00, 0x00
201 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
203 { ICE_ETYPE_OL, 12 },
204 { ICE_IPV4_OFOS, 14 },
208 { ICE_VXLAN_GPE, 42 },
211 { ICE_UDP_ILOS, 84 },
212 { ICE_PROTOCOL_LAST, 0 },
215 static const u8 dummy_udp_tun_udp_packet[] = {
216 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
217 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00,
220 0x08, 0x00, /* ICE_ETYPE_OL 12 */
222 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
223 0x00, 0x01, 0x00, 0x00,
224 0x00, 0x11, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00,
226 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
229 0x00, 0x3a, 0x00, 0x00,
231 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
232 0x00, 0x00, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
235 0x00, 0x00, 0x00, 0x00,
236 0x00, 0x00, 0x00, 0x00,
239 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
240 0x00, 0x01, 0x00, 0x00,
241 0x00, 0x11, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00,
245 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
246 0x00, 0x08, 0x00, 0x00,
249 /* offset info for MAC + IPv4 + UDP dummy packet */
250 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
252 { ICE_ETYPE_OL, 12 },
253 { ICE_IPV4_OFOS, 14 },
254 { ICE_UDP_ILOS, 34 },
255 { ICE_PROTOCOL_LAST, 0 },
258 /* Dummy packet for MAC + IPv4 + UDP */
259 static const u8 dummy_udp_packet[] = {
260 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
261 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00,
264 0x08, 0x00, /* ICE_ETYPE_OL 12 */
266 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
267 0x00, 0x01, 0x00, 0x00,
268 0x00, 0x11, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
273 0x00, 0x08, 0x00, 0x00,
275 0x00, 0x00, /* 2 bytes for 4 byte alignment */
278 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
279 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
281 { ICE_ETYPE_OL, 12 },
282 { ICE_VLAN_OFOS, 14 },
283 { ICE_IPV4_OFOS, 18 },
284 { ICE_UDP_ILOS, 38 },
285 { ICE_PROTOCOL_LAST, 0 },
288 /* C-tag (801.1Q), IPv4:UDP dummy packet */
289 static const u8 dummy_vlan_udp_packet[] = {
290 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
291 0x00, 0x00, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00,
294 0x81, 0x00, /* ICE_ETYPE_OL 12 */
296 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
298 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
299 0x00, 0x01, 0x00, 0x00,
300 0x00, 0x11, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
305 0x00, 0x08, 0x00, 0x00,
307 0x00, 0x00, /* 2 bytes for 4 byte alignment */
310 /* offset info for MAC + IPv4 + TCP dummy packet */
311 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
313 { ICE_ETYPE_OL, 12 },
314 { ICE_IPV4_OFOS, 14 },
316 { ICE_PROTOCOL_LAST, 0 },
319 /* Dummy packet for MAC + IPv4 + TCP */
320 static const u8 dummy_tcp_packet[] = {
321 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
322 0x00, 0x00, 0x00, 0x00,
323 0x00, 0x00, 0x00, 0x00,
325 0x08, 0x00, /* ICE_ETYPE_OL 12 */
327 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
328 0x00, 0x01, 0x00, 0x00,
329 0x00, 0x06, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x50, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, /* 2 bytes for 4 byte alignment */
342 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
343 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
345 { ICE_ETYPE_OL, 12 },
346 { ICE_VLAN_OFOS, 14 },
347 { ICE_IPV4_OFOS, 18 },
349 { ICE_PROTOCOL_LAST, 0 },
352 /* C-tag (801.1Q), IPv4:TCP dummy packet */
353 static const u8 dummy_vlan_tcp_packet[] = {
354 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
355 0x00, 0x00, 0x00, 0x00,
356 0x00, 0x00, 0x00, 0x00,
358 0x81, 0x00, /* ICE_ETYPE_OL 12 */
360 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
362 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
363 0x00, 0x01, 0x00, 0x00,
364 0x00, 0x06, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x50, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, /* 2 bytes for 4 byte alignment */
377 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
379 { ICE_ETYPE_OL, 12 },
380 { ICE_IPV6_OFOS, 14 },
382 { ICE_PROTOCOL_LAST, 0 },
385 static const u8 dummy_tcp_ipv6_packet[] = {
386 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
387 0x00, 0x00, 0x00, 0x00,
388 0x00, 0x00, 0x00, 0x00,
390 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
392 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
393 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x50, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x00, 0x00, /* 2 bytes for 4 byte alignment */
412 /* C-tag (802.1Q): IPv6 + TCP */
413 static const struct ice_dummy_pkt_offsets
414 dummy_vlan_tcp_ipv6_packet_offsets[] = {
416 { ICE_ETYPE_OL, 12 },
417 { ICE_VLAN_OFOS, 14 },
418 { ICE_IPV6_OFOS, 18 },
420 { ICE_PROTOCOL_LAST, 0 },
423 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
424 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
425 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
426 0x00, 0x00, 0x00, 0x00,
427 0x00, 0x00, 0x00, 0x00,
429 0x81, 0x00, /* ICE_ETYPE_OL 12 */
431 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
433 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
434 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
447 0x50, 0x00, 0x00, 0x00,
448 0x00, 0x00, 0x00, 0x00,
450 0x00, 0x00, /* 2 bytes for 4 byte alignment */
454 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
456 { ICE_ETYPE_OL, 12 },
457 { ICE_IPV6_OFOS, 14 },
458 { ICE_UDP_ILOS, 54 },
459 { ICE_PROTOCOL_LAST, 0 },
462 /* IPv6 + UDP dummy packet */
463 static const u8 dummy_udp_ipv6_packet[] = {
464 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
465 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, 0x00, 0x00,
468 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
470 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
471 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
481 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
482 0x00, 0x08, 0x00, 0x00,
484 0x00, 0x00, /* 2 bytes for 4 byte alignment */
487 /* C-tag (802.1Q): IPv6 + UDP */
488 static const struct ice_dummy_pkt_offsets
489 dummy_vlan_udp_ipv6_packet_offsets[] = {
491 { ICE_ETYPE_OL, 12 },
492 { ICE_VLAN_OFOS, 14 },
493 { ICE_IPV6_OFOS, 18 },
494 { ICE_UDP_ILOS, 58 },
495 { ICE_PROTOCOL_LAST, 0 },
498 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
499 static const u8 dummy_vlan_udp_ipv6_packet[] = {
500 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
501 0x00, 0x00, 0x00, 0x00,
502 0x00, 0x00, 0x00, 0x00,
504 0x81, 0x00, /* ICE_ETYPE_OL 12 */
506 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
508 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
509 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
520 0x00, 0x08, 0x00, 0x00,
522 0x00, 0x00, /* 2 bytes for 4 byte alignment */
525 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
527 { ICE_IPV4_OFOS, 14 },
530 { ICE_PROTOCOL_LAST, 0 },
533 static const u8 dummy_udp_gtp_packet[] = {
534 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
535 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x00,
539 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
540 0x00, 0x00, 0x00, 0x00,
541 0x00, 0x11, 0x00, 0x00,
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
545 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
546 0x00, 0x1c, 0x00, 0x00,
548 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x85,
552 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
553 0x00, 0x00, 0x00, 0x00,
556 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
558 { ICE_ETYPE_OL, 12 },
559 { ICE_VLAN_OFOS, 14},
561 { ICE_PROTOCOL_LAST, 0 },
564 static const u8 dummy_pppoe_ipv4_packet[] = {
565 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
566 0x00, 0x00, 0x00, 0x00,
567 0x00, 0x00, 0x00, 0x00,
569 0x81, 0x00, /* ICE_ETYPE_OL 12 */
571 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
573 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
576 0x00, 0x21, /* PPP Link Layer 24 */
578 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
587 static const u8 dummy_pppoe_ipv6_packet[] = {
588 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
592 0x81, 0x00, /* ICE_ETYPE_OL 12 */
594 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
596 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
599 0x00, 0x57, /* PPP Link Layer 24 */
601 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
615 /* this is a recipe to profile association bitmap */
616 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
617 ICE_MAX_NUM_PROFILES);
619 /* this is a profile to recipe association bitmap */
620 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
621 ICE_MAX_NUM_RECIPES);
623 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
626 * ice_collect_result_idx - copy result index values
627 * @buf: buffer that contains the result index
628 * @recp: the recipe struct to copy data into
630 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
631 struct ice_sw_recipe *recp)
633 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
634 ice_set_bit(buf->content.result_indx &
635 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
639 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
640 * @hw: pointer to hardware structure
641 * @recps: struct that we need to populate
642 * @rid: recipe ID that we are populating
643 * @refresh_required: true if we should get recipe to profile mapping from FW
645 * This function is used to populate all the necessary entries into our
646 * bookkeeping so that we have a current list of all the recipes that are
647 * programmed in the firmware.
649 static enum ice_status
650 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
651 bool *refresh_required)
653 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
654 struct ice_aqc_recipe_data_elem *tmp;
655 u16 num_recps = ICE_MAX_NUM_RECIPES;
656 struct ice_prot_lkup_ext *lkup_exts;
657 enum ice_status status;
661 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
663 /* we need a buffer big enough to accommodate all the recipes */
664 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
665 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
667 return ICE_ERR_NO_MEMORY;
669 tmp[0].recipe_indx = rid;
670 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
671 /* non-zero status meaning recipe doesn't exist */
675 /* Get recipe to profile map so that we can get the fv from lkups that
676 * we read for a recipe from FW. Since we want to minimize the number of
677 * times we make this FW call, just make one call and cache the copy
678 * until a new recipe is added. This operation is only required the
679 * first time to get the changes from FW. Then to search existing
680 * entries we don't need to update the cache again until another recipe
683 if (*refresh_required) {
684 ice_get_recp_to_prof_map(hw);
685 *refresh_required = false;
688 /* Start populating all the entries for recps[rid] based on lkups from
689 * firmware. Note that we are only creating the root recipe in our
692 lkup_exts = &recps[rid].lkup_exts;
694 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
695 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
696 struct ice_recp_grp_entry *rg_entry;
697 u8 i, prof, idx, prot = 0;
701 rg_entry = (struct ice_recp_grp_entry *)
702 ice_malloc(hw, sizeof(*rg_entry));
704 status = ICE_ERR_NO_MEMORY;
708 idx = root_bufs.recipe_indx;
709 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
711 /* Mark all result indices in this chain */
712 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
713 ice_set_bit(root_bufs.content.result_indx &
714 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
716 /* get the first profile that is associated with rid */
717 prof = ice_find_first_bit(recipe_to_profile[idx],
718 ICE_MAX_NUM_PROFILES);
719 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
720 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
722 rg_entry->fv_idx[i] = lkup_indx;
723 rg_entry->fv_mask[i] =
724 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
726 /* If the recipe is a chained recipe then all its
727 * child recipe's result will have a result index.
728 * To fill fv_words we should not use those result
729 * index, we only need the protocol ids and offsets.
730 * We will skip all the fv_idx which stores result
731 * index in them. We also need to skip any fv_idx which
732 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
733 * valid offset value.
735 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
736 rg_entry->fv_idx[i]) ||
737 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
738 rg_entry->fv_idx[i] == 0)
741 ice_find_prot_off(hw, ICE_BLK_SW, prof,
742 rg_entry->fv_idx[i], &prot, &off);
743 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
744 lkup_exts->fv_words[fv_word_idx].off = off;
747 /* populate rg_list with the data from the child entry of this
750 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
752 /* Propagate some data to the recipe database */
753 recps[idx].is_root = !!is_root;
754 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
755 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
756 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
757 recps[idx].chain_idx = root_bufs.content.result_indx &
758 ~ICE_AQ_RECIPE_RESULT_EN;
759 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
761 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
767 /* Only do the following for root recipes entries */
768 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
769 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
770 recps[idx].root_rid = root_bufs.content.rid &
771 ~ICE_AQ_RECIPE_ID_IS_ROOT;
772 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
775 /* Complete initialization of the root recipe entry */
776 lkup_exts->n_val_words = fv_word_idx;
777 recps[rid].big_recp = (num_recps > 1);
778 recps[rid].n_grp_count = (u8)num_recps;
779 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
780 ice_memdup(hw, tmp, recps[rid].n_grp_count *
781 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
782 if (!recps[rid].root_buf)
785 /* Copy result indexes */
786 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
787 recps[rid].recp_created = true;
795 * ice_get_recp_to_prof_map - updates recipe to profile mapping
796 * @hw: pointer to hardware structure
798 * This function is used to populate recipe_to_profile matrix where index to
799 * this array is the recipe ID and the element is the mapping of which profiles
800 * is this recipe mapped to.
803 ice_get_recp_to_prof_map(struct ice_hw *hw)
805 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
808 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
811 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
812 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
813 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
815 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
816 ICE_MAX_NUM_RECIPES);
817 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
818 if (ice_is_bit_set(r_bitmap, j))
819 ice_set_bit(i, recipe_to_profile[j]);
824 * ice_init_def_sw_recp - initialize the recipe book keeping tables
825 * @hw: pointer to the HW struct
826 * @recp_list: pointer to sw recipe list
828 * Allocate memory for the entire recipe table and initialize the structures/
829 * entries corresponding to basic recipes.
832 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
834 struct ice_sw_recipe *recps;
837 recps = (struct ice_sw_recipe *)
838 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
840 return ICE_ERR_NO_MEMORY;
842 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
843 recps[i].root_rid = i;
844 INIT_LIST_HEAD(&recps[i].filt_rules);
845 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
846 INIT_LIST_HEAD(&recps[i].rg_list);
847 ice_init_lock(&recps[i].filt_rule_lock);
856 * ice_aq_get_sw_cfg - get switch configuration
857 * @hw: pointer to the hardware structure
858 * @buf: pointer to the result buffer
859 * @buf_size: length of the buffer available for response
860 * @req_desc: pointer to requested descriptor
861 * @num_elems: pointer to number of elements
862 * @cd: pointer to command details structure or NULL
864 * Get switch configuration (0x0200) to be placed in 'buff'.
865 * This admin command returns information such as initial VSI/port number
866 * and switch ID it belongs to.
868 * NOTE: *req_desc is both an input/output parameter.
869 * The caller of this function first calls this function with *request_desc set
870 * to 0. If the response from f/w has *req_desc set to 0, all the switch
871 * configuration information has been returned; if non-zero (meaning not all
872 * the information was returned), the caller should call this function again
873 * with *req_desc set to the previous value returned by f/w to get the
874 * next block of switch configuration information.
876 * *num_elems is output only parameter. This reflects the number of elements
877 * in response buffer. The caller of this function to use *num_elems while
878 * parsing the response buffer.
880 static enum ice_status
881 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
882 u16 buf_size, u16 *req_desc, u16 *num_elems,
883 struct ice_sq_cd *cd)
885 struct ice_aqc_get_sw_cfg *cmd;
886 enum ice_status status;
887 struct ice_aq_desc desc;
889 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
890 cmd = &desc.params.get_sw_conf;
891 cmd->element = CPU_TO_LE16(*req_desc);
893 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
895 *req_desc = LE16_TO_CPU(cmd->element);
896 *num_elems = LE16_TO_CPU(cmd->num_elems);
903 * ice_alloc_sw - allocate resources specific to switch
904 * @hw: pointer to the HW struct
905 * @ena_stats: true to turn on VEB stats
906 * @shared_res: true for shared resource, false for dedicated resource
907 * @sw_id: switch ID returned
908 * @counter_id: VEB counter ID returned
910 * allocates switch resources (SWID and VEB counter) (0x0208)
913 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
916 struct ice_aqc_alloc_free_res_elem *sw_buf;
917 struct ice_aqc_res_elem *sw_ele;
918 enum ice_status status;
921 buf_len = sizeof(*sw_buf);
922 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
923 ice_malloc(hw, buf_len);
925 return ICE_ERR_NO_MEMORY;
927 /* Prepare buffer for switch ID.
928 * The number of resource entries in buffer is passed as 1 since only a
929 * single switch/VEB instance is allocated, and hence a single sw_id
932 sw_buf->num_elems = CPU_TO_LE16(1);
934 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
935 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
936 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
938 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
939 ice_aqc_opc_alloc_res, NULL);
942 goto ice_alloc_sw_exit;
944 sw_ele = &sw_buf->elem[0];
945 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
948 /* Prepare buffer for VEB Counter */
949 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
950 struct ice_aqc_alloc_free_res_elem *counter_buf;
951 struct ice_aqc_res_elem *counter_ele;
953 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
954 ice_malloc(hw, buf_len);
956 status = ICE_ERR_NO_MEMORY;
957 goto ice_alloc_sw_exit;
960 /* The number of resource entries in buffer is passed as 1 since
961 * only a single switch/VEB instance is allocated, and hence a
962 * single VEB counter is requested.
964 counter_buf->num_elems = CPU_TO_LE16(1);
965 counter_buf->res_type =
966 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
967 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
968 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
972 ice_free(hw, counter_buf);
973 goto ice_alloc_sw_exit;
975 counter_ele = &counter_buf->elem[0];
976 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
977 ice_free(hw, counter_buf);
981 ice_free(hw, sw_buf);
986 * ice_free_sw - free resources specific to switch
987 * @hw: pointer to the HW struct
988 * @sw_id: switch ID returned
989 * @counter_id: VEB counter ID returned
991 * free switch resources (SWID and VEB counter) (0x0209)
993 * NOTE: This function frees multiple resources. It continues
994 * releasing other resources even after it encounters error.
995 * The error code returned is the last error it encountered.
997 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
999 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1000 enum ice_status status, ret_status;
1003 buf_len = sizeof(*sw_buf);
1004 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1005 ice_malloc(hw, buf_len);
1007 return ICE_ERR_NO_MEMORY;
1009 /* Prepare buffer to free for switch ID res.
1010 * The number of resource entries in buffer is passed as 1 since only a
1011 * single switch/VEB instance is freed, and hence a single sw_id
1014 sw_buf->num_elems = CPU_TO_LE16(1);
1015 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1016 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1018 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1019 ice_aqc_opc_free_res, NULL);
1022 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1024 /* Prepare buffer to free for VEB Counter resource */
1025 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1026 ice_malloc(hw, buf_len);
1028 ice_free(hw, sw_buf);
1029 return ICE_ERR_NO_MEMORY;
1032 /* The number of resource entries in buffer is passed as 1 since only a
1033 * single switch/VEB instance is freed, and hence a single VEB counter
1036 counter_buf->num_elems = CPU_TO_LE16(1);
1037 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1038 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1040 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1041 ice_aqc_opc_free_res, NULL);
1043 ice_debug(hw, ICE_DBG_SW,
1044 "VEB counter resource could not be freed\n");
1045 ret_status = status;
1048 ice_free(hw, counter_buf);
1049 ice_free(hw, sw_buf);
1055 * @hw: pointer to the HW struct
1056 * @vsi_ctx: pointer to a VSI context struct
1057 * @cd: pointer to command details structure or NULL
1059 * Add a VSI context to the hardware (0x0210)
1062 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1063 struct ice_sq_cd *cd)
1065 struct ice_aqc_add_update_free_vsi_resp *res;
1066 struct ice_aqc_add_get_update_free_vsi *cmd;
1067 struct ice_aq_desc desc;
1068 enum ice_status status;
1070 cmd = &desc.params.vsi_cmd;
1071 res = &desc.params.add_update_free_vsi_res;
1073 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1075 if (!vsi_ctx->alloc_from_pool)
1076 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1077 ICE_AQ_VSI_IS_VALID);
1079 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1081 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1083 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1084 sizeof(vsi_ctx->info), cd);
1087 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1088 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1089 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1097 * @hw: pointer to the HW struct
1098 * @vsi_ctx: pointer to a VSI context struct
1099 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1100 * @cd: pointer to command details structure or NULL
1102 * Free VSI context info from hardware (0x0213)
1105 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1106 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1108 struct ice_aqc_add_update_free_vsi_resp *resp;
1109 struct ice_aqc_add_get_update_free_vsi *cmd;
1110 struct ice_aq_desc desc;
1111 enum ice_status status;
1113 cmd = &desc.params.vsi_cmd;
1114 resp = &desc.params.add_update_free_vsi_res;
1116 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1118 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1120 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1122 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1124 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1125 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1133 * @hw: pointer to the HW struct
1134 * @vsi_ctx: pointer to a VSI context struct
1135 * @cd: pointer to command details structure or NULL
1137 * Update VSI context in the hardware (0x0211)
1140 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1141 struct ice_sq_cd *cd)
1143 struct ice_aqc_add_update_free_vsi_resp *resp;
1144 struct ice_aqc_add_get_update_free_vsi *cmd;
1145 struct ice_aq_desc desc;
1146 enum ice_status status;
1148 cmd = &desc.params.vsi_cmd;
1149 resp = &desc.params.add_update_free_vsi_res;
1151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1153 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1155 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1157 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1158 sizeof(vsi_ctx->info), cd);
1161 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1162 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1169 * ice_is_vsi_valid - check whether the VSI is valid or not
1170 * @hw: pointer to the HW struct
1171 * @vsi_handle: VSI handle
1173 * check whether the VSI is valid or not
1175 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1177 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1181 * ice_get_hw_vsi_num - return the HW VSI number
1182 * @hw: pointer to the HW struct
1183 * @vsi_handle: VSI handle
1185 * return the HW VSI number
1186 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1188 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1190 return hw->vsi_ctx[vsi_handle]->vsi_num;
1194 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1195 * @hw: pointer to the HW struct
1196 * @vsi_handle: VSI handle
1198 * return the VSI context entry for a given VSI handle
1200 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1202 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1206 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1207 * @hw: pointer to the HW struct
1208 * @vsi_handle: VSI handle
1209 * @vsi: VSI context pointer
1211 * save the VSI context entry for a given VSI handle
1214 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1216 hw->vsi_ctx[vsi_handle] = vsi;
1220 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1221 * @hw: pointer to the HW struct
1222 * @vsi_handle: VSI handle
1224 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1226 struct ice_vsi_ctx *vsi;
1229 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1232 ice_for_each_traffic_class(i) {
1233 if (vsi->lan_q_ctx[i]) {
1234 ice_free(hw, vsi->lan_q_ctx[i]);
1235 vsi->lan_q_ctx[i] = NULL;
1241 * ice_clear_vsi_ctx - clear the VSI context entry
1242 * @hw: pointer to the HW struct
1243 * @vsi_handle: VSI handle
1245 * clear the VSI context entry
1247 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1249 struct ice_vsi_ctx *vsi;
1251 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1253 ice_clear_vsi_q_ctx(hw, vsi_handle);
1255 hw->vsi_ctx[vsi_handle] = NULL;
1260 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1261 * @hw: pointer to the HW struct
1263 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1267 for (i = 0; i < ICE_MAX_VSI; i++)
1268 ice_clear_vsi_ctx(hw, i);
1272 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1273 * @hw: pointer to the HW struct
1274 * @vsi_handle: unique VSI handle provided by drivers
1275 * @vsi_ctx: pointer to a VSI context struct
1276 * @cd: pointer to command details structure or NULL
1278 * Add a VSI context to the hardware also add it into the VSI handle list.
1279 * If this function gets called after reset for existing VSIs then update
1280 * with the new HW VSI number in the corresponding VSI handle list entry.
1283 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1284 struct ice_sq_cd *cd)
1286 struct ice_vsi_ctx *tmp_vsi_ctx;
1287 enum ice_status status;
1289 if (vsi_handle >= ICE_MAX_VSI)
1290 return ICE_ERR_PARAM;
1291 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1294 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1296 /* Create a new VSI context */
1297 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1298 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1300 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1301 return ICE_ERR_NO_MEMORY;
1303 *tmp_vsi_ctx = *vsi_ctx;
1305 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1307 /* update with new HW VSI num */
1308 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1315 * ice_free_vsi- free VSI context from hardware and VSI handle list
1316 * @hw: pointer to the HW struct
1317 * @vsi_handle: unique VSI handle
1318 * @vsi_ctx: pointer to a VSI context struct
1319 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1320 * @cd: pointer to command details structure or NULL
1322 * Free VSI context info from hardware as well as from VSI handle list
1325 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1326 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1328 enum ice_status status;
1330 if (!ice_is_vsi_valid(hw, vsi_handle))
1331 return ICE_ERR_PARAM;
1332 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1333 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1335 ice_clear_vsi_ctx(hw, vsi_handle);
1341 * @hw: pointer to the HW struct
1342 * @vsi_handle: unique VSI handle
1343 * @vsi_ctx: pointer to a VSI context struct
1344 * @cd: pointer to command details structure or NULL
1346 * Update VSI context in the hardware
1349 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1350 struct ice_sq_cd *cd)
1352 if (!ice_is_vsi_valid(hw, vsi_handle))
1353 return ICE_ERR_PARAM;
1354 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1355 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1359 * ice_aq_get_vsi_params
1360 * @hw: pointer to the HW struct
1361 * @vsi_ctx: pointer to a VSI context struct
1362 * @cd: pointer to command details structure or NULL
1364 * Get VSI context info from hardware (0x0212)
1367 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1368 struct ice_sq_cd *cd)
1370 struct ice_aqc_add_get_update_free_vsi *cmd;
1371 struct ice_aqc_get_vsi_resp *resp;
1372 struct ice_aq_desc desc;
1373 enum ice_status status;
1375 cmd = &desc.params.vsi_cmd;
1376 resp = &desc.params.get_vsi_resp;
1378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1380 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1382 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1383 sizeof(vsi_ctx->info), cd);
1385 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1387 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1388 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1395 * ice_aq_add_update_mir_rule - add/update a mirror rule
1396 * @hw: pointer to the HW struct
1397 * @rule_type: Rule Type
1398 * @dest_vsi: VSI number to which packets will be mirrored
1399 * @count: length of the list
1400 * @mr_buf: buffer for list of mirrored VSI numbers
1401 * @cd: pointer to command details structure or NULL
1404 * Add/Update Mirror Rule (0x260).
1407 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1408 u16 count, struct ice_mir_rule_buf *mr_buf,
1409 struct ice_sq_cd *cd, u16 *rule_id)
1411 struct ice_aqc_add_update_mir_rule *cmd;
1412 struct ice_aq_desc desc;
1413 enum ice_status status;
1414 __le16 *mr_list = NULL;
1417 switch (rule_type) {
1418 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1419 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1420 /* Make sure count and mr_buf are set for these rule_types */
1421 if (!(count && mr_buf))
1422 return ICE_ERR_PARAM;
1424 buf_size = count * sizeof(__le16);
1425 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1427 return ICE_ERR_NO_MEMORY;
1429 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1430 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1431 /* Make sure count and mr_buf are not set for these
1434 if (count || mr_buf)
1435 return ICE_ERR_PARAM;
1438 ice_debug(hw, ICE_DBG_SW,
1439 "Error due to unsupported rule_type %u\n", rule_type);
1440 return ICE_ERR_OUT_OF_RANGE;
1443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1445 /* Pre-process 'mr_buf' items for add/update of virtual port
1446 * ingress/egress mirroring (but not physical port ingress/egress
1452 for (i = 0; i < count; i++) {
1455 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1457 /* Validate specified VSI number, make sure it is less
1458 * than ICE_MAX_VSI, if not return with error.
1460 if (id >= ICE_MAX_VSI) {
1461 ice_debug(hw, ICE_DBG_SW,
1462 "Error VSI index (%u) out-of-range\n",
1464 ice_free(hw, mr_list);
1465 return ICE_ERR_OUT_OF_RANGE;
1468 /* add VSI to mirror rule */
1471 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1472 else /* remove VSI from mirror rule */
1473 mr_list[i] = CPU_TO_LE16(id);
1477 cmd = &desc.params.add_update_rule;
1478 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1479 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1480 ICE_AQC_RULE_ID_VALID_M);
1481 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1482 cmd->num_entries = CPU_TO_LE16(count);
1483 cmd->dest = CPU_TO_LE16(dest_vsi);
1485 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1487 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1489 ice_free(hw, mr_list);
1495 * ice_aq_delete_mir_rule - delete a mirror rule
1496 * @hw: pointer to the HW struct
1497 * @rule_id: Mirror rule ID (to be deleted)
1498 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1499 * otherwise it is returned to the shared pool
1500 * @cd: pointer to command details structure or NULL
1502 * Delete Mirror Rule (0x261).
1505 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1506 struct ice_sq_cd *cd)
1508 struct ice_aqc_delete_mir_rule *cmd;
1509 struct ice_aq_desc desc;
1511 /* rule_id should be in the range 0...63 */
1512 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1513 return ICE_ERR_OUT_OF_RANGE;
1515 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1517 cmd = &desc.params.del_rule;
1518 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1519 cmd->rule_id = CPU_TO_LE16(rule_id);
1522 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1524 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1528 * ice_aq_alloc_free_vsi_list
1529 * @hw: pointer to the HW struct
1530 * @vsi_list_id: VSI list ID returned or used for lookup
1531 * @lkup_type: switch rule filter lookup type
1532 * @opc: switch rules population command type - pass in the command opcode
1534 * allocates or free a VSI list resource
1536 static enum ice_status
1537 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1538 enum ice_sw_lkup_type lkup_type,
1539 enum ice_adminq_opc opc)
1541 struct ice_aqc_alloc_free_res_elem *sw_buf;
1542 struct ice_aqc_res_elem *vsi_ele;
1543 enum ice_status status;
1546 buf_len = sizeof(*sw_buf);
1547 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1548 ice_malloc(hw, buf_len);
1550 return ICE_ERR_NO_MEMORY;
1551 sw_buf->num_elems = CPU_TO_LE16(1);
1553 if (lkup_type == ICE_SW_LKUP_MAC ||
1554 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1555 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1556 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1557 lkup_type == ICE_SW_LKUP_PROMISC ||
1558 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1559 lkup_type == ICE_SW_LKUP_LAST) {
1560 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1561 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1563 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1565 status = ICE_ERR_PARAM;
1566 goto ice_aq_alloc_free_vsi_list_exit;
1569 if (opc == ice_aqc_opc_free_res)
1570 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1572 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1574 goto ice_aq_alloc_free_vsi_list_exit;
1576 if (opc == ice_aqc_opc_alloc_res) {
1577 vsi_ele = &sw_buf->elem[0];
1578 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1581 ice_aq_alloc_free_vsi_list_exit:
1582 ice_free(hw, sw_buf);
1587 * ice_aq_set_storm_ctrl - Sets storm control configuration
1588 * @hw: pointer to the HW struct
1589 * @bcast_thresh: represents the upper threshold for broadcast storm control
1590 * @mcast_thresh: represents the upper threshold for multicast storm control
1591 * @ctl_bitmask: storm control control knobs
1593 * Sets the storm control configuration (0x0280)
1596 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1599 struct ice_aqc_storm_cfg *cmd;
1600 struct ice_aq_desc desc;
1602 cmd = &desc.params.storm_conf;
1604 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1606 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1607 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1608 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1610 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1614 * ice_aq_get_storm_ctrl - gets storm control configuration
1615 * @hw: pointer to the HW struct
1616 * @bcast_thresh: represents the upper threshold for broadcast storm control
1617 * @mcast_thresh: represents the upper threshold for multicast storm control
1618 * @ctl_bitmask: storm control control knobs
1620 * Gets the storm control configuration (0x0281)
1623 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1626 enum ice_status status;
1627 struct ice_aq_desc desc;
1629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1631 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1633 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1636 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1639 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1642 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1649 * ice_aq_sw_rules - add/update/remove switch rules
1650 * @hw: pointer to the HW struct
1651 * @rule_list: pointer to switch rule population list
1652 * @rule_list_sz: total size of the rule list in bytes
1653 * @num_rules: number of switch rules in the rule_list
1654 * @opc: switch rules population command type - pass in the command opcode
1655 * @cd: pointer to command details structure or NULL
1657 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1659 static enum ice_status
1660 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1661 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1663 struct ice_aq_desc desc;
1665 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1667 if (opc != ice_aqc_opc_add_sw_rules &&
1668 opc != ice_aqc_opc_update_sw_rules &&
1669 opc != ice_aqc_opc_remove_sw_rules)
1670 return ICE_ERR_PARAM;
1672 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1674 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1675 desc.params.sw_rules.num_rules_fltr_entry_index =
1676 CPU_TO_LE16(num_rules);
1677 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1681 * ice_aq_add_recipe - add switch recipe
1682 * @hw: pointer to the HW struct
1683 * @s_recipe_list: pointer to switch rule population list
1684 * @num_recipes: number of switch recipes in the list
1685 * @cd: pointer to command details structure or NULL
1690 ice_aq_add_recipe(struct ice_hw *hw,
1691 struct ice_aqc_recipe_data_elem *s_recipe_list,
1692 u16 num_recipes, struct ice_sq_cd *cd)
1694 struct ice_aqc_add_get_recipe *cmd;
1695 struct ice_aq_desc desc;
1698 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1699 cmd = &desc.params.add_get_recipe;
1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1702 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1703 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1705 buf_size = num_recipes * sizeof(*s_recipe_list);
1707 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1711 * ice_aq_get_recipe - get switch recipe
1712 * @hw: pointer to the HW struct
1713 * @s_recipe_list: pointer to switch rule population list
1714 * @num_recipes: pointer to the number of recipes (input and output)
1715 * @recipe_root: root recipe number of recipe(s) to retrieve
1716 * @cd: pointer to command details structure or NULL
1720 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1721 * On output, *num_recipes will equal the number of entries returned in
1724 * The caller must supply enough space in s_recipe_list to hold all possible
1725 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1728 ice_aq_get_recipe(struct ice_hw *hw,
1729 struct ice_aqc_recipe_data_elem *s_recipe_list,
1730 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1732 struct ice_aqc_add_get_recipe *cmd;
1733 struct ice_aq_desc desc;
1734 enum ice_status status;
1737 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1738 return ICE_ERR_PARAM;
1740 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1741 cmd = &desc.params.add_get_recipe;
1742 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1744 cmd->return_index = CPU_TO_LE16(recipe_root);
1745 cmd->num_sub_recipes = 0;
1747 buf_size = *num_recipes * sizeof(*s_recipe_list);
1749 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1750 /* cppcheck-suppress constArgument */
1751 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1757 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1758 * @hw: pointer to the HW struct
1759 * @profile_id: package profile ID to associate the recipe with
1760 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1761 * @cd: pointer to command details structure or NULL
1762 * Recipe to profile association (0x0291)
1765 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1766 struct ice_sq_cd *cd)
1768 struct ice_aqc_recipe_to_profile *cmd;
1769 struct ice_aq_desc desc;
1771 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1772 cmd = &desc.params.recipe_to_profile;
1773 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1774 cmd->profile_id = CPU_TO_LE16(profile_id);
1775 /* Set the recipe ID bit in the bitmask to let the device know which
1776 * profile we are associating the recipe to
1778 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1779 ICE_NONDMA_TO_NONDMA);
1781 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1785 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1786 * @hw: pointer to the HW struct
1787 * @profile_id: package profile ID to associate the recipe with
1788 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1789 * @cd: pointer to command details structure or NULL
1790 * Associate profile ID with given recipe (0x0293)
1793 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1794 struct ice_sq_cd *cd)
1796 struct ice_aqc_recipe_to_profile *cmd;
1797 struct ice_aq_desc desc;
1798 enum ice_status status;
1800 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1801 cmd = &desc.params.recipe_to_profile;
1802 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1803 cmd->profile_id = CPU_TO_LE16(profile_id);
1805 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1807 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1808 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1814 * ice_alloc_recipe - add recipe resource
1815 * @hw: pointer to the hardware structure
1816 * @rid: recipe ID returned as response to AQ call
1818 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1820 struct ice_aqc_alloc_free_res_elem *sw_buf;
1821 enum ice_status status;
1824 buf_len = sizeof(*sw_buf);
1825 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1827 return ICE_ERR_NO_MEMORY;
1829 sw_buf->num_elems = CPU_TO_LE16(1);
1830 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1831 ICE_AQC_RES_TYPE_S) |
1832 ICE_AQC_RES_TYPE_FLAG_SHARED);
1833 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1834 ice_aqc_opc_alloc_res, NULL);
1836 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1837 ice_free(hw, sw_buf);
1842 /* ice_init_port_info - Initialize port_info with switch configuration data
1843 * @pi: pointer to port_info
1844 * @vsi_port_num: VSI number or port number
1845 * @type: Type of switch element (port or VSI)
1846 * @swid: switch ID of the switch the element is attached to
1847 * @pf_vf_num: PF or VF number
1848 * @is_vf: true if the element is a VF, false otherwise
1851 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1852 u16 swid, u16 pf_vf_num, bool is_vf)
1855 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1856 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1858 pi->pf_vf_num = pf_vf_num;
1860 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1861 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1864 ice_debug(pi->hw, ICE_DBG_SW,
1865 "incorrect VSI/port type received\n");
1870 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1871 * @hw: pointer to the hardware structure
1873 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1875 struct ice_aqc_get_sw_cfg_resp *rbuf;
1876 enum ice_status status;
1883 num_total_ports = 1;
1885 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1886 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1889 return ICE_ERR_NO_MEMORY;
1891 /* Multiple calls to ice_aq_get_sw_cfg may be required
1892 * to get all the switch configuration information. The need
1893 * for additional calls is indicated by ice_aq_get_sw_cfg
1894 * writing a non-zero value in req_desc
1897 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1898 &req_desc, &num_elems, NULL);
1903 for (i = 0; i < num_elems; i++) {
1904 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1905 u16 pf_vf_num, swid, vsi_port_num;
1909 ele = rbuf[i].elements;
1910 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1911 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1913 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1914 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1916 swid = LE16_TO_CPU(ele->swid);
1918 if (LE16_TO_CPU(ele->pf_vf_num) &
1919 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1922 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1923 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1926 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1927 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1928 if (j == num_total_ports) {
1929 ice_debug(hw, ICE_DBG_SW,
1930 "more ports than expected\n");
1931 status = ICE_ERR_CFG;
1934 ice_init_port_info(hw->port_info,
1935 vsi_port_num, res_type, swid,
1943 } while (req_desc && !status);
1946 ice_free(hw, (void *)rbuf);
1951 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1952 * @hw: pointer to the hardware structure
1953 * @fi: filter info structure to fill/update
1955 * This helper function populates the lb_en and lan_en elements of the provided
1956 * ice_fltr_info struct using the switch's type and characteristics of the
1957 * switch rule being configured.
1959 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1963 if ((fi->flag & ICE_FLTR_TX) &&
1964 (fi->fltr_act == ICE_FWD_TO_VSI ||
1965 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1966 fi->fltr_act == ICE_FWD_TO_Q ||
1967 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1968 /* Setting LB for prune actions will result in replicated
1969 * packets to the internal switch that will be dropped.
1971 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1974 /* Set lan_en to TRUE if
1975 * 1. The switch is a VEB AND
1977 * 2.1 The lookup is a directional lookup like ethertype,
1978 * promiscuous, ethertype-MAC, promiscuous-VLAN
1979 * and default-port OR
1980 * 2.2 The lookup is VLAN, OR
1981 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1982 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1986 * The switch is a VEPA.
1988 * In all other cases, the LAN enable has to be set to false.
1991 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1992 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1993 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1994 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1995 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1996 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1997 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1998 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1999 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2000 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2009 * ice_fill_sw_rule - Helper function to fill switch rule structure
2010 * @hw: pointer to the hardware structure
2011 * @f_info: entry containing packet forwarding information
2012 * @s_rule: switch rule structure to be filled in based on mac_entry
2013 * @opc: switch rules population command type - pass in the command opcode
2016 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2017 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2019 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2027 if (opc == ice_aqc_opc_remove_sw_rules) {
2028 s_rule->pdata.lkup_tx_rx.act = 0;
2029 s_rule->pdata.lkup_tx_rx.index =
2030 CPU_TO_LE16(f_info->fltr_rule_id);
2031 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2035 eth_hdr_sz = sizeof(dummy_eth_header);
2036 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2038 /* initialize the ether header with a dummy header */
2039 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2040 ice_fill_sw_info(hw, f_info);
2042 switch (f_info->fltr_act) {
2043 case ICE_FWD_TO_VSI:
2044 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2045 ICE_SINGLE_ACT_VSI_ID_M;
2046 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2047 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2048 ICE_SINGLE_ACT_VALID_BIT;
2050 case ICE_FWD_TO_VSI_LIST:
2051 act |= ICE_SINGLE_ACT_VSI_LIST;
2052 act |= (f_info->fwd_id.vsi_list_id <<
2053 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2054 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2055 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2056 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2057 ICE_SINGLE_ACT_VALID_BIT;
2060 act |= ICE_SINGLE_ACT_TO_Q;
2061 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2062 ICE_SINGLE_ACT_Q_INDEX_M;
2064 case ICE_DROP_PACKET:
2065 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2066 ICE_SINGLE_ACT_VALID_BIT;
2068 case ICE_FWD_TO_QGRP:
2069 q_rgn = f_info->qgrp_size > 0 ?
2070 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2071 act |= ICE_SINGLE_ACT_TO_Q;
2072 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2073 ICE_SINGLE_ACT_Q_INDEX_M;
2074 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2075 ICE_SINGLE_ACT_Q_REGION_M;
2082 act |= ICE_SINGLE_ACT_LB_ENABLE;
2084 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2086 switch (f_info->lkup_type) {
2087 case ICE_SW_LKUP_MAC:
2088 daddr = f_info->l_data.mac.mac_addr;
2090 case ICE_SW_LKUP_VLAN:
2091 vlan_id = f_info->l_data.vlan.vlan_id;
2092 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2093 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2094 act |= ICE_SINGLE_ACT_PRUNE;
2095 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2098 case ICE_SW_LKUP_ETHERTYPE_MAC:
2099 daddr = f_info->l_data.ethertype_mac.mac_addr;
2101 case ICE_SW_LKUP_ETHERTYPE:
2102 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2103 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2105 case ICE_SW_LKUP_MAC_VLAN:
2106 daddr = f_info->l_data.mac_vlan.mac_addr;
2107 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2109 case ICE_SW_LKUP_PROMISC_VLAN:
2110 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2112 case ICE_SW_LKUP_PROMISC:
2113 daddr = f_info->l_data.mac_vlan.mac_addr;
2119 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2120 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2121 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2123 /* Recipe set depending on lookup type */
2124 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2125 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2126 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2129 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2130 ICE_NONDMA_TO_NONDMA);
2132 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2133 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2134 *off = CPU_TO_BE16(vlan_id);
2137 /* Create the switch rule with the final dummy Ethernet header */
2138 if (opc != ice_aqc_opc_update_sw_rules)
2139 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2143 * ice_add_marker_act
2144 * @hw: pointer to the hardware structure
2145 * @m_ent: the management entry for which sw marker needs to be added
2146 * @sw_marker: sw marker to tag the Rx descriptor with
2147 * @l_id: large action resource ID
2149 * Create a large action to hold software marker and update the switch rule
2150 * entry pointed by m_ent with newly created large action
2152 static enum ice_status
2153 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2154 u16 sw_marker, u16 l_id)
2156 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2157 /* For software marker we need 3 large actions
2158 * 1. FWD action: FWD TO VSI or VSI LIST
2159 * 2. GENERIC VALUE action to hold the profile ID
2160 * 3. GENERIC VALUE action to hold the software marker ID
2162 const u16 num_lg_acts = 3;
2163 enum ice_status status;
2169 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2170 return ICE_ERR_PARAM;
2172 /* Create two back-to-back switch rules and submit them to the HW using
2173 * one memory buffer:
2177 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2178 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2179 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2181 return ICE_ERR_NO_MEMORY;
2183 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2185 /* Fill in the first switch rule i.e. large action */
2186 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2187 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2188 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2190 /* First action VSI forwarding or VSI list forwarding depending on how
2193 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2194 m_ent->fltr_info.fwd_id.hw_vsi_id;
2196 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2197 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2198 ICE_LG_ACT_VSI_LIST_ID_M;
2199 if (m_ent->vsi_count > 1)
2200 act |= ICE_LG_ACT_VSI_LIST;
2201 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2203 /* Second action descriptor type */
2204 act = ICE_LG_ACT_GENERIC;
2206 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2207 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2209 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2210 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2212 /* Third action Marker value */
2213 act |= ICE_LG_ACT_GENERIC;
2214 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2215 ICE_LG_ACT_GENERIC_VALUE_M;
2217 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2219 /* call the fill switch rule to fill the lookup Tx Rx structure */
2220 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2221 ice_aqc_opc_update_sw_rules);
2223 /* Update the action to point to the large action ID */
2224 rx_tx->pdata.lkup_tx_rx.act =
2225 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2226 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2227 ICE_SINGLE_ACT_PTR_VAL_M));
2229 /* Use the filter rule ID of the previously created rule with single
2230 * act. Once the update happens, hardware will treat this as large
2233 rx_tx->pdata.lkup_tx_rx.index =
2234 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2236 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2237 ice_aqc_opc_update_sw_rules, NULL);
2239 m_ent->lg_act_idx = l_id;
2240 m_ent->sw_marker_id = sw_marker;
2243 ice_free(hw, lg_act);
2248 * ice_add_counter_act - add/update filter rule with counter action
2249 * @hw: pointer to the hardware structure
2250 * @m_ent: the management entry for which counter needs to be added
2251 * @counter_id: VLAN counter ID returned as part of allocate resource
2252 * @l_id: large action resource ID
2254 static enum ice_status
2255 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2256 u16 counter_id, u16 l_id)
2258 struct ice_aqc_sw_rules_elem *lg_act;
2259 struct ice_aqc_sw_rules_elem *rx_tx;
2260 enum ice_status status;
2261 /* 2 actions will be added while adding a large action counter */
2262 const int num_acts = 2;
2269 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2270 return ICE_ERR_PARAM;
2272 /* Create two back-to-back switch rules and submit them to the HW using
2273 * one memory buffer:
2277 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2278 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2279 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2282 return ICE_ERR_NO_MEMORY;
2284 rx_tx = (struct ice_aqc_sw_rules_elem *)
2285 ((u8 *)lg_act + lg_act_size);
2287 /* Fill in the first switch rule i.e. large action */
2288 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2289 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2290 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2292 /* First action VSI forwarding or VSI list forwarding depending on how
2295 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2296 m_ent->fltr_info.fwd_id.hw_vsi_id;
2298 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2299 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2300 ICE_LG_ACT_VSI_LIST_ID_M;
2301 if (m_ent->vsi_count > 1)
2302 act |= ICE_LG_ACT_VSI_LIST;
2303 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2305 /* Second action counter ID */
2306 act = ICE_LG_ACT_STAT_COUNT;
2307 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2308 ICE_LG_ACT_STAT_COUNT_M;
2309 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2311 /* call the fill switch rule to fill the lookup Tx Rx structure */
2312 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2313 ice_aqc_opc_update_sw_rules);
2315 act = ICE_SINGLE_ACT_PTR;
2316 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2317 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2319 /* Use the filter rule ID of the previously created rule with single
2320 * act. Once the update happens, hardware will treat this as large
2323 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2324 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2326 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2327 ice_aqc_opc_update_sw_rules, NULL);
2329 m_ent->lg_act_idx = l_id;
2330 m_ent->counter_index = counter_id;
2333 ice_free(hw, lg_act);
2338 * ice_create_vsi_list_map
2339 * @hw: pointer to the hardware structure
2340 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2341 * @num_vsi: number of VSI handles in the array
2342 * @vsi_list_id: VSI list ID generated as part of allocate resource
2344 * Helper function to create a new entry of VSI list ID to VSI mapping
2345 * using the given VSI list ID
2347 static struct ice_vsi_list_map_info *
2348 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2351 struct ice_switch_info *sw = hw->switch_info;
2352 struct ice_vsi_list_map_info *v_map;
2355 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2360 v_map->vsi_list_id = vsi_list_id;
2362 for (i = 0; i < num_vsi; i++)
2363 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2365 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2370 * ice_update_vsi_list_rule
2371 * @hw: pointer to the hardware structure
2372 * @vsi_handle_arr: array of VSI handles to form a VSI list
2373 * @num_vsi: number of VSI handles in the array
2374 * @vsi_list_id: VSI list ID generated as part of allocate resource
2375 * @remove: Boolean value to indicate if this is a remove action
2376 * @opc: switch rules population command type - pass in the command opcode
2377 * @lkup_type: lookup type of the filter
2379 * Call AQ command to add a new switch rule or update existing switch rule
2380 * using the given VSI list ID
2382 static enum ice_status
2383 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2384 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2385 enum ice_sw_lkup_type lkup_type)
2387 struct ice_aqc_sw_rules_elem *s_rule;
2388 enum ice_status status;
2394 return ICE_ERR_PARAM;
2396 if (lkup_type == ICE_SW_LKUP_MAC ||
2397 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2398 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2399 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2400 lkup_type == ICE_SW_LKUP_PROMISC ||
2401 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2402 lkup_type == ICE_SW_LKUP_LAST)
2403 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2404 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2405 else if (lkup_type == ICE_SW_LKUP_VLAN)
2406 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2407 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2409 return ICE_ERR_PARAM;
2411 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2412 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2414 return ICE_ERR_NO_MEMORY;
2415 for (i = 0; i < num_vsi; i++) {
2416 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2417 status = ICE_ERR_PARAM;
2420 /* AQ call requires hw_vsi_id(s) */
2421 s_rule->pdata.vsi_list.vsi[i] =
2422 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2425 s_rule->type = CPU_TO_LE16(rule_type);
2426 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2427 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2429 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2432 ice_free(hw, s_rule);
2437 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2438 * @hw: pointer to the HW struct
2439 * @vsi_handle_arr: array of VSI handles to form a VSI list
2440 * @num_vsi: number of VSI handles in the array
2441 * @vsi_list_id: stores the ID of the VSI list to be created
2442 * @lkup_type: switch rule filter's lookup type
2444 static enum ice_status
2445 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2446 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2448 enum ice_status status;
2450 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2451 ice_aqc_opc_alloc_res);
2455 /* Update the newly created VSI list to include the specified VSIs */
2456 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2457 *vsi_list_id, false,
2458 ice_aqc_opc_add_sw_rules, lkup_type);
2462 * ice_create_pkt_fwd_rule
2463 * @hw: pointer to the hardware structure
2464 * @recp_list: corresponding filter management list
2465 * @f_entry: entry containing packet forwarding information
2467 * Create switch rule with given filter information and add an entry
2468 * to the corresponding filter management list to track this switch rule
2471 static enum ice_status
2472 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2473 struct ice_fltr_list_entry *f_entry)
2475 struct ice_fltr_mgmt_list_entry *fm_entry;
2476 struct ice_aqc_sw_rules_elem *s_rule;
2477 enum ice_status status;
2479 s_rule = (struct ice_aqc_sw_rules_elem *)
2480 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2482 return ICE_ERR_NO_MEMORY;
2483 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2484 ice_malloc(hw, sizeof(*fm_entry));
2486 status = ICE_ERR_NO_MEMORY;
2487 goto ice_create_pkt_fwd_rule_exit;
2490 fm_entry->fltr_info = f_entry->fltr_info;
2492 /* Initialize all the fields for the management entry */
2493 fm_entry->vsi_count = 1;
2494 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2495 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2496 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2498 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2499 ice_aqc_opc_add_sw_rules);
2501 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2502 ice_aqc_opc_add_sw_rules, NULL);
2504 ice_free(hw, fm_entry);
2505 goto ice_create_pkt_fwd_rule_exit;
2508 f_entry->fltr_info.fltr_rule_id =
2509 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2510 fm_entry->fltr_info.fltr_rule_id =
2511 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2513 /* The book keeping entries will get removed when base driver
2514 * calls remove filter AQ command
2516 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2518 ice_create_pkt_fwd_rule_exit:
2519 ice_free(hw, s_rule);
2524 * ice_update_pkt_fwd_rule
2525 * @hw: pointer to the hardware structure
2526 * @f_info: filter information for switch rule
2528 * Call AQ command to update a previously created switch rule with a
2531 static enum ice_status
2532 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2534 struct ice_aqc_sw_rules_elem *s_rule;
2535 enum ice_status status;
2537 s_rule = (struct ice_aqc_sw_rules_elem *)
2538 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2540 return ICE_ERR_NO_MEMORY;
2542 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2544 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2546 /* Update switch rule with new rule set to forward VSI list */
2547 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2548 ice_aqc_opc_update_sw_rules, NULL);
2550 ice_free(hw, s_rule);
2555 * ice_update_sw_rule_bridge_mode
2556 * @hw: pointer to the HW struct
2558 * Updates unicast switch filter rules based on VEB/VEPA mode
2560 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2562 struct ice_switch_info *sw = hw->switch_info;
2563 struct ice_fltr_mgmt_list_entry *fm_entry;
2564 enum ice_status status = ICE_SUCCESS;
2565 struct LIST_HEAD_TYPE *rule_head;
2566 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2568 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2569 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2571 ice_acquire_lock(rule_lock);
2572 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2574 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2575 u8 *addr = fi->l_data.mac.mac_addr;
2577 /* Update unicast Tx rules to reflect the selected
2580 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2581 (fi->fltr_act == ICE_FWD_TO_VSI ||
2582 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2583 fi->fltr_act == ICE_FWD_TO_Q ||
2584 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2585 status = ice_update_pkt_fwd_rule(hw, fi);
2591 ice_release_lock(rule_lock);
2597 * ice_add_update_vsi_list
2598 * @hw: pointer to the hardware structure
2599 * @m_entry: pointer to current filter management list entry
2600 * @cur_fltr: filter information from the book keeping entry
2601 * @new_fltr: filter information with the new VSI to be added
2603 * Call AQ command to add or update previously created VSI list with new VSI.
2605 * Helper function to do book keeping associated with adding filter information
2606 * The algorithm to do the book keeping is described below :
2607 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2608 * if only one VSI has been added till now
2609 * Allocate a new VSI list and add two VSIs
2610 * to this list using switch rule command
2611 * Update the previously created switch rule with the
2612 * newly created VSI list ID
2613 * if a VSI list was previously created
2614 * Add the new VSI to the previously created VSI list set
2615 * using the update switch rule command
2617 static enum ice_status
2618 ice_add_update_vsi_list(struct ice_hw *hw,
2619 struct ice_fltr_mgmt_list_entry *m_entry,
2620 struct ice_fltr_info *cur_fltr,
2621 struct ice_fltr_info *new_fltr)
2623 enum ice_status status = ICE_SUCCESS;
2624 u16 vsi_list_id = 0;
2626 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2627 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2628 return ICE_ERR_NOT_IMPL;
2630 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2631 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2632 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2633 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2634 return ICE_ERR_NOT_IMPL;
2636 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2637 /* Only one entry existed in the mapping and it was not already
2638 * a part of a VSI list. So, create a VSI list with the old and
2641 struct ice_fltr_info tmp_fltr;
2642 u16 vsi_handle_arr[2];
2644 /* A rule already exists with the new VSI being added */
2645 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2646 return ICE_ERR_ALREADY_EXISTS;
2648 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2649 vsi_handle_arr[1] = new_fltr->vsi_handle;
2650 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2652 new_fltr->lkup_type);
2656 tmp_fltr = *new_fltr;
2657 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2658 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2659 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2660 /* Update the previous switch rule of "MAC forward to VSI" to
2661 * "MAC fwd to VSI list"
2663 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2667 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2668 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2669 m_entry->vsi_list_info =
2670 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2673 /* If this entry was large action then the large action needs
2674 * to be updated to point to FWD to VSI list
2676 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2678 ice_add_marker_act(hw, m_entry,
2679 m_entry->sw_marker_id,
2680 m_entry->lg_act_idx);
2682 u16 vsi_handle = new_fltr->vsi_handle;
2683 enum ice_adminq_opc opcode;
2685 if (!m_entry->vsi_list_info)
2688 /* A rule already exists with the new VSI being added */
2689 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2692 /* Update the previously created VSI list set with
2693 * the new VSI ID passed in
2695 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2696 opcode = ice_aqc_opc_update_sw_rules;
2698 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2699 vsi_list_id, false, opcode,
2700 new_fltr->lkup_type);
2701 /* update VSI list mapping info with new VSI ID */
2703 ice_set_bit(vsi_handle,
2704 m_entry->vsi_list_info->vsi_map);
2707 m_entry->vsi_count++;
2712 * ice_find_rule_entry - Search a rule entry
2713 * @list_head: head of rule list
2714 * @f_info: rule information
2716 * Helper function to search for a given rule entry
2717 * Returns pointer to entry storing the rule if found
2719 static struct ice_fltr_mgmt_list_entry *
2720 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2721 struct ice_fltr_info *f_info)
2723 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2725 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2727 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2728 sizeof(f_info->l_data)) &&
2729 f_info->flag == list_itr->fltr_info.flag) {
2738 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2739 * @recp_list: VSI lists needs to be searched
2740 * @vsi_handle: VSI handle to be found in VSI list
2741 * @vsi_list_id: VSI list ID found containing vsi_handle
2743 * Helper function to search a VSI list with single entry containing given VSI
2744 * handle element. This can be extended further to search VSI list with more
2745 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2747 static struct ice_vsi_list_map_info *
2748 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2751 struct ice_vsi_list_map_info *map_info = NULL;
2752 struct LIST_HEAD_TYPE *list_head;
2754 list_head = &recp_list->filt_rules;
2755 if (recp_list->adv_rule) {
2756 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2758 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2759 ice_adv_fltr_mgmt_list_entry,
2761 if (list_itr->vsi_list_info) {
2762 map_info = list_itr->vsi_list_info;
2763 if (ice_is_bit_set(map_info->vsi_map,
2765 *vsi_list_id = map_info->vsi_list_id;
2771 struct ice_fltr_mgmt_list_entry *list_itr;
2773 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2774 ice_fltr_mgmt_list_entry,
2776 if (list_itr->vsi_count == 1 &&
2777 list_itr->vsi_list_info) {
2778 map_info = list_itr->vsi_list_info;
2779 if (ice_is_bit_set(map_info->vsi_map,
2781 *vsi_list_id = map_info->vsi_list_id;
2791 * ice_add_rule_internal - add rule for a given lookup type
2792 * @hw: pointer to the hardware structure
2793 * @recp_list: recipe list for which rule has to be added
2794 * @lport: logic port number on which function add rule
2795 * @f_entry: structure containing MAC forwarding information
2797 * Adds or updates the rule lists for a given recipe
2799 static enum ice_status
2800 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2801 u8 lport, struct ice_fltr_list_entry *f_entry)
2803 struct ice_fltr_info *new_fltr, *cur_fltr;
2804 struct ice_fltr_mgmt_list_entry *m_entry;
2805 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2806 enum ice_status status = ICE_SUCCESS;
2808 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2809 return ICE_ERR_PARAM;
2811 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2812 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2813 f_entry->fltr_info.fwd_id.hw_vsi_id =
2814 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2816 rule_lock = &recp_list->filt_rule_lock;
2818 ice_acquire_lock(rule_lock);
2819 new_fltr = &f_entry->fltr_info;
2820 if (new_fltr->flag & ICE_FLTR_RX)
2821 new_fltr->src = lport;
2822 else if (new_fltr->flag & ICE_FLTR_TX)
2824 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2826 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2828 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2829 goto exit_add_rule_internal;
2832 cur_fltr = &m_entry->fltr_info;
2833 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2835 exit_add_rule_internal:
2836 ice_release_lock(rule_lock);
2841 * ice_remove_vsi_list_rule
2842 * @hw: pointer to the hardware structure
2843 * @vsi_list_id: VSI list ID generated as part of allocate resource
2844 * @lkup_type: switch rule filter lookup type
2846 * The VSI list should be emptied before this function is called to remove the
2849 static enum ice_status
2850 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2851 enum ice_sw_lkup_type lkup_type)
2853 struct ice_aqc_sw_rules_elem *s_rule;
2854 enum ice_status status;
2857 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2858 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2860 return ICE_ERR_NO_MEMORY;
2862 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2863 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2865 /* Free the vsi_list resource that we allocated. It is assumed that the
2866 * list is empty at this point.
2868 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2869 ice_aqc_opc_free_res);
2871 ice_free(hw, s_rule);
2876 * ice_rem_update_vsi_list
2877 * @hw: pointer to the hardware structure
2878 * @vsi_handle: VSI handle of the VSI to remove
2879 * @fm_list: filter management entry for which the VSI list management needs to
2882 static enum ice_status
2883 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2884 struct ice_fltr_mgmt_list_entry *fm_list)
2886 enum ice_sw_lkup_type lkup_type;
2887 enum ice_status status = ICE_SUCCESS;
2890 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2891 fm_list->vsi_count == 0)
2892 return ICE_ERR_PARAM;
2894 /* A rule with the VSI being removed does not exist */
2895 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2896 return ICE_ERR_DOES_NOT_EXIST;
2898 lkup_type = fm_list->fltr_info.lkup_type;
2899 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2900 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2901 ice_aqc_opc_update_sw_rules,
2906 fm_list->vsi_count--;
2907 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2909 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2910 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2911 struct ice_vsi_list_map_info *vsi_list_info =
2912 fm_list->vsi_list_info;
2915 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2917 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2918 return ICE_ERR_OUT_OF_RANGE;
2920 /* Make sure VSI list is empty before removing it below */
2921 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2923 ice_aqc_opc_update_sw_rules,
2928 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2929 tmp_fltr_info.fwd_id.hw_vsi_id =
2930 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2931 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2932 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2934 ice_debug(hw, ICE_DBG_SW,
2935 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2936 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2940 fm_list->fltr_info = tmp_fltr_info;
2943 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2944 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2945 struct ice_vsi_list_map_info *vsi_list_info =
2946 fm_list->vsi_list_info;
2948 /* Remove the VSI list since it is no longer used */
2949 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2951 ice_debug(hw, ICE_DBG_SW,
2952 "Failed to remove VSI list %d, error %d\n",
2953 vsi_list_id, status);
2957 LIST_DEL(&vsi_list_info->list_entry);
2958 ice_free(hw, vsi_list_info);
2959 fm_list->vsi_list_info = NULL;
2966 * ice_remove_rule_internal - Remove a filter rule of a given type
2968 * @hw: pointer to the hardware structure
2969 * @recp_list: recipe list for which the rule needs to removed
2970 * @f_entry: rule entry containing filter information
2972 static enum ice_status
2973 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2974 struct ice_fltr_list_entry *f_entry)
2976 struct ice_fltr_mgmt_list_entry *list_elem;
2977 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2978 enum ice_status status = ICE_SUCCESS;
2979 bool remove_rule = false;
2982 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2983 return ICE_ERR_PARAM;
2984 f_entry->fltr_info.fwd_id.hw_vsi_id =
2985 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2987 rule_lock = &recp_list->filt_rule_lock;
2988 ice_acquire_lock(rule_lock);
2989 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2990 &f_entry->fltr_info);
2992 status = ICE_ERR_DOES_NOT_EXIST;
2996 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2998 } else if (!list_elem->vsi_list_info) {
2999 status = ICE_ERR_DOES_NOT_EXIST;
3001 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3002 /* a ref_cnt > 1 indicates that the vsi_list is being
3003 * shared by multiple rules. Decrement the ref_cnt and
3004 * remove this rule, but do not modify the list, as it
3005 * is in-use by other rules.
3007 list_elem->vsi_list_info->ref_cnt--;
3010 /* a ref_cnt of 1 indicates the vsi_list is only used
3011 * by one rule. However, the original removal request is only
3012 * for a single VSI. Update the vsi_list first, and only
3013 * remove the rule if there are no further VSIs in this list.
3015 vsi_handle = f_entry->fltr_info.vsi_handle;
3016 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3019 /* if VSI count goes to zero after updating the VSI list */
3020 if (list_elem->vsi_count == 0)
3025 /* Remove the lookup rule */
3026 struct ice_aqc_sw_rules_elem *s_rule;
3028 s_rule = (struct ice_aqc_sw_rules_elem *)
3029 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3031 status = ICE_ERR_NO_MEMORY;
3035 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3036 ice_aqc_opc_remove_sw_rules);
3038 status = ice_aq_sw_rules(hw, s_rule,
3039 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3040 ice_aqc_opc_remove_sw_rules, NULL);
3042 /* Remove a book keeping from the list */
3043 ice_free(hw, s_rule);
3048 LIST_DEL(&list_elem->list_entry);
3049 ice_free(hw, list_elem);
3052 ice_release_lock(rule_lock);
3057 * ice_aq_get_res_alloc - get allocated resources
3058 * @hw: pointer to the HW struct
3059 * @num_entries: pointer to u16 to store the number of resource entries returned
3060 * @buf: pointer to user-supplied buffer
3061 * @buf_size: size of buff
3062 * @cd: pointer to command details structure or NULL
3064 * The user-supplied buffer must be large enough to store the resource
3065 * information for all resource types. Each resource type is an
3066 * ice_aqc_get_res_resp_data_elem structure.
3069 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3070 u16 buf_size, struct ice_sq_cd *cd)
3072 struct ice_aqc_get_res_alloc *resp;
3073 enum ice_status status;
3074 struct ice_aq_desc desc;
3077 return ICE_ERR_BAD_PTR;
3079 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3080 return ICE_ERR_INVAL_SIZE;
3082 resp = &desc.params.get_res;
3084 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3085 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3087 if (!status && num_entries)
3088 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3094 * ice_aq_get_res_descs - get allocated resource descriptors
3095 * @hw: pointer to the hardware structure
3096 * @num_entries: number of resource entries in buffer
3097 * @buf: Indirect buffer to hold data parameters and response
3098 * @buf_size: size of buffer for indirect commands
3099 * @res_type: resource type
3100 * @res_shared: is resource shared
3101 * @desc_id: input - first desc ID to start; output - next desc ID
3102 * @cd: pointer to command details structure or NULL
3105 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3106 struct ice_aqc_get_allocd_res_desc_resp *buf,
3107 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3108 struct ice_sq_cd *cd)
3110 struct ice_aqc_get_allocd_res_desc *cmd;
3111 struct ice_aq_desc desc;
3112 enum ice_status status;
3114 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3116 cmd = &desc.params.get_res_desc;
3119 return ICE_ERR_PARAM;
3121 if (buf_size != (num_entries * sizeof(*buf)))
3122 return ICE_ERR_PARAM;
3124 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3126 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3127 ICE_AQC_RES_TYPE_M) | (res_shared ?
3128 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3129 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3131 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3133 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3139 * ice_add_mac_rule - Add a MAC address based filter rule
3140 * @hw: pointer to the hardware structure
3141 * @m_list: list of MAC addresses and forwarding information
3142 * @sw: pointer to switch info struct for which function add rule
3143 * @lport: logic port number on which function add rule
3145 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3146 * multiple unicast addresses, the function assumes that all the
3147 * addresses are unique in a given add_mac call. It doesn't
3148 * check for duplicates in this case, removing duplicates from a given
3149 * list should be taken care of in the caller of this function.
3151 static enum ice_status
3152 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3153 struct ice_switch_info *sw, u8 lport)
3155 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3156 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3157 struct ice_fltr_list_entry *m_list_itr;
3158 struct LIST_HEAD_TYPE *rule_head;
3159 u16 total_elem_left, s_rule_size;
3160 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3161 enum ice_status status = ICE_SUCCESS;
3162 u16 num_unicast = 0;
3166 rule_lock = &recp_list->filt_rule_lock;
3167 rule_head = &recp_list->filt_rules;
3169 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3171 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3175 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3176 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3177 if (!ice_is_vsi_valid(hw, vsi_handle))
3178 return ICE_ERR_PARAM;
3179 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3180 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3181 /* update the src in case it is VSI num */
3182 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3183 return ICE_ERR_PARAM;
3184 m_list_itr->fltr_info.src = hw_vsi_id;
3185 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3186 IS_ZERO_ETHER_ADDR(add))
3187 return ICE_ERR_PARAM;
3188 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3189 /* Don't overwrite the unicast address */
3190 ice_acquire_lock(rule_lock);
3191 if (ice_find_rule_entry(rule_head,
3192 &m_list_itr->fltr_info)) {
3193 ice_release_lock(rule_lock);
3194 return ICE_ERR_ALREADY_EXISTS;
3196 ice_release_lock(rule_lock);
3198 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3199 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3200 m_list_itr->status =
3201 ice_add_rule_internal(hw, recp_list, lport,
3203 if (m_list_itr->status)
3204 return m_list_itr->status;
3208 ice_acquire_lock(rule_lock);
3209 /* Exit if no suitable entries were found for adding bulk switch rule */
3211 status = ICE_SUCCESS;
3212 goto ice_add_mac_exit;
3215 /* Allocate switch rule buffer for the bulk update for unicast */
3216 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3217 s_rule = (struct ice_aqc_sw_rules_elem *)
3218 ice_calloc(hw, num_unicast, s_rule_size);
3220 status = ICE_ERR_NO_MEMORY;
3221 goto ice_add_mac_exit;
3225 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3227 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3228 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3230 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3231 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3232 ice_aqc_opc_add_sw_rules);
3233 r_iter = (struct ice_aqc_sw_rules_elem *)
3234 ((u8 *)r_iter + s_rule_size);
3238 /* Call AQ bulk switch rule update for all unicast addresses */
3240 /* Call AQ switch rule in AQ_MAX chunk */
3241 for (total_elem_left = num_unicast; total_elem_left > 0;
3242 total_elem_left -= elem_sent) {
3243 struct ice_aqc_sw_rules_elem *entry = r_iter;
3245 elem_sent = MIN_T(u8, total_elem_left,
3246 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3247 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3248 elem_sent, ice_aqc_opc_add_sw_rules,
3251 goto ice_add_mac_exit;
3252 r_iter = (struct ice_aqc_sw_rules_elem *)
3253 ((u8 *)r_iter + (elem_sent * s_rule_size));
3256 /* Fill up rule ID based on the value returned from FW */
3258 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3260 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3261 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3262 struct ice_fltr_mgmt_list_entry *fm_entry;
3264 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3265 f_info->fltr_rule_id =
3266 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3267 f_info->fltr_act = ICE_FWD_TO_VSI;
3268 /* Create an entry to track this MAC address */
3269 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3270 ice_malloc(hw, sizeof(*fm_entry));
3272 status = ICE_ERR_NO_MEMORY;
3273 goto ice_add_mac_exit;
3275 fm_entry->fltr_info = *f_info;
3276 fm_entry->vsi_count = 1;
3277 /* The book keeping entries will get removed when
3278 * base driver calls remove filter AQ command
3281 LIST_ADD(&fm_entry->list_entry, rule_head);
3282 r_iter = (struct ice_aqc_sw_rules_elem *)
3283 ((u8 *)r_iter + s_rule_size);
3288 ice_release_lock(rule_lock);
3290 ice_free(hw, s_rule);
3295 * ice_add_mac - Add a MAC address based filter rule
3296 * @hw: pointer to the hardware structure
3297 * @m_list: list of MAC addresses and forwarding information
3299 * Function add MAC rule for logical port from HW struct
3302 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3305 return ICE_ERR_PARAM;
3307 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3308 hw->port_info->lport);
3312 * ice_add_vlan_internal - Add one VLAN based filter rule
3313 * @hw: pointer to the hardware structure
3314 * @recp_list: recipe list for which rule has to be added
3315 * @f_entry: filter entry containing one VLAN information
3317 static enum ice_status
3318 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3319 struct ice_fltr_list_entry *f_entry)
3321 struct ice_fltr_mgmt_list_entry *v_list_itr;
3322 struct ice_fltr_info *new_fltr, *cur_fltr;
3323 enum ice_sw_lkup_type lkup_type;
3324 u16 vsi_list_id = 0, vsi_handle;
3325 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3326 enum ice_status status = ICE_SUCCESS;
3328 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3329 return ICE_ERR_PARAM;
3331 f_entry->fltr_info.fwd_id.hw_vsi_id =
3332 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3333 new_fltr = &f_entry->fltr_info;
3335 /* VLAN ID should only be 12 bits */
3336 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3337 return ICE_ERR_PARAM;
3339 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3340 return ICE_ERR_PARAM;
3342 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3343 lkup_type = new_fltr->lkup_type;
3344 vsi_handle = new_fltr->vsi_handle;
3345 rule_lock = &recp_list->filt_rule_lock;
3346 ice_acquire_lock(rule_lock);
3347 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3349 struct ice_vsi_list_map_info *map_info = NULL;
3351 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3352 /* All VLAN pruning rules use a VSI list. Check if
3353 * there is already a VSI list containing VSI that we
3354 * want to add. If found, use the same vsi_list_id for
3355 * this new VLAN rule or else create a new list.
3357 map_info = ice_find_vsi_list_entry(recp_list,
3361 status = ice_create_vsi_list_rule(hw,
3369 /* Convert the action to forwarding to a VSI list. */
3370 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3371 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3374 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3376 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3379 status = ICE_ERR_DOES_NOT_EXIST;
3382 /* reuse VSI list for new rule and increment ref_cnt */
3384 v_list_itr->vsi_list_info = map_info;
3385 map_info->ref_cnt++;
3387 v_list_itr->vsi_list_info =
3388 ice_create_vsi_list_map(hw, &vsi_handle,
3392 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3393 /* Update existing VSI list to add new VSI ID only if it used
3396 cur_fltr = &v_list_itr->fltr_info;
3397 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3400 /* If VLAN rule exists and VSI list being used by this rule is
3401 * referenced by more than 1 VLAN rule. Then create a new VSI
3402 * list appending previous VSI with new VSI and update existing
3403 * VLAN rule to point to new VSI list ID
3405 struct ice_fltr_info tmp_fltr;
3406 u16 vsi_handle_arr[2];
3409 /* Current implementation only supports reusing VSI list with
3410 * one VSI count. We should never hit below condition
3412 if (v_list_itr->vsi_count > 1 &&
3413 v_list_itr->vsi_list_info->ref_cnt > 1) {
3414 ice_debug(hw, ICE_DBG_SW,
3415 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3416 status = ICE_ERR_CFG;
3421 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3424 /* A rule already exists with the new VSI being added */
3425 if (cur_handle == vsi_handle) {
3426 status = ICE_ERR_ALREADY_EXISTS;
3430 vsi_handle_arr[0] = cur_handle;
3431 vsi_handle_arr[1] = vsi_handle;
3432 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3433 &vsi_list_id, lkup_type);
3437 tmp_fltr = v_list_itr->fltr_info;
3438 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3439 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3440 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3441 /* Update the previous switch rule to a new VSI list which
3442 * includes current VSI that is requested
3444 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3448 /* before overriding VSI list map info. decrement ref_cnt of
3451 v_list_itr->vsi_list_info->ref_cnt--;
3453 /* now update to newly created list */
3454 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3455 v_list_itr->vsi_list_info =
3456 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3458 v_list_itr->vsi_count++;
3462 ice_release_lock(rule_lock);
3467 * ice_add_vlan_rule - Add VLAN based filter rule
3468 * @hw: pointer to the hardware structure
3469 * @v_list: list of VLAN entries and forwarding information
3470 * @sw: pointer to switch info struct for which function add rule
3472 static enum ice_status
3473 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3474 struct ice_switch_info *sw)
3476 struct ice_fltr_list_entry *v_list_itr;
3477 struct ice_sw_recipe *recp_list;
3479 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3480 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3482 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3483 return ICE_ERR_PARAM;
3484 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3485 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3487 if (v_list_itr->status)
3488 return v_list_itr->status;
3494 * ice_add_vlan - Add a VLAN based filter rule
3495 * @hw: pointer to the hardware structure
3496 * @v_list: list of VLAN and forwarding information
3498 * Function add VLAN rule for logical port from HW struct
3501 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3504 return ICE_ERR_PARAM;
3506 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3510 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3511 * @hw: pointer to the hardware structure
3512 * @mv_list: list of MAC and VLAN filters
3513 * @sw: pointer to switch info struct for which function add rule
3514 * @lport: logic port number on which function add rule
3516 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3517 * pruning bits enabled, then it is the responsibility of the caller to make
3518 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3519 * VLAN won't be received on that VSI otherwise.
3521 static enum ice_status
3522 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
3523 struct ice_switch_info *sw, u8 lport)
3525 struct ice_fltr_list_entry *mv_list_itr;
3526 struct ice_sw_recipe *recp_list;
3528 if (!mv_list || !hw)
3529 return ICE_ERR_PARAM;
3531 recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
3532 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3534 enum ice_sw_lkup_type l_type =
3535 mv_list_itr->fltr_info.lkup_type;
3537 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3538 return ICE_ERR_PARAM;
3539 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3540 mv_list_itr->status =
3541 ice_add_rule_internal(hw, recp_list, lport,
3543 if (mv_list_itr->status)
3544 return mv_list_itr->status;
3550 * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
3551 * @hw: pointer to the hardware structure
3552 * @mv_list: list of MAC VLAN addresses and forwarding information
3554 * Function add MAC VLAN rule for logical port from HW struct
3557 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3559 if (!mv_list || !hw)
3560 return ICE_ERR_PARAM;
3562 return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
3563 hw->port_info->lport);
3567 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3568 * @hw: pointer to the hardware structure
3569 * @em_list: list of ether type MAC filter, MAC is optional
3570 * @sw: pointer to switch info struct for which function add rule
3571 * @lport: logic port number on which function add rule
3573 * This function requires the caller to populate the entries in
3574 * the filter list with the necessary fields (including flags to
3575 * indicate Tx or Rx rules).
3577 static enum ice_status
3578 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3579 struct ice_switch_info *sw, u8 lport)
3581 struct ice_fltr_list_entry *em_list_itr;
3583 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3585 struct ice_sw_recipe *recp_list;
3586 enum ice_sw_lkup_type l_type;
3588 l_type = em_list_itr->fltr_info.lkup_type;
3589 recp_list = &sw->recp_list[l_type];
3591 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3592 l_type != ICE_SW_LKUP_ETHERTYPE)
3593 return ICE_ERR_PARAM;
3595 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3598 if (em_list_itr->status)
3599 return em_list_itr->status;
3606 * ice_add_eth_mac - Add a ethertype based filter rule
3607 * @hw: pointer to the hardware structure
3608 * @em_list: list of ethertype and forwarding information
3610 * Function add ethertype rule for logical port from HW struct
3612 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3614 if (!em_list || !hw)
3615 return ICE_ERR_PARAM;
3617 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3618 hw->port_info->lport);
3622 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3623 * @hw: pointer to the hardware structure
3624 * @em_list: list of ethertype or ethertype MAC entries
3625 * @sw: pointer to switch info struct for which function add rule
3627 static enum ice_status
3628 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3629 struct ice_switch_info *sw)
3631 struct ice_fltr_list_entry *em_list_itr, *tmp;
3633 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3635 struct ice_sw_recipe *recp_list;
3636 enum ice_sw_lkup_type l_type;
3638 l_type = em_list_itr->fltr_info.lkup_type;
3640 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3641 l_type != ICE_SW_LKUP_ETHERTYPE)
3642 return ICE_ERR_PARAM;
3644 recp_list = &sw->recp_list[l_type];
3645 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3647 if (em_list_itr->status)
3648 return em_list_itr->status;
3654 * ice_remove_eth_mac - remove a ethertype based filter rule
3655 * @hw: pointer to the hardware structure
3656 * @em_list: list of ethertype and forwarding information
3660 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3662 if (!em_list || !hw)
3663 return ICE_ERR_PARAM;
3665 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3669 * ice_rem_sw_rule_info
3670 * @hw: pointer to the hardware structure
3671 * @rule_head: pointer to the switch list structure that we want to delete
3674 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3676 if (!LIST_EMPTY(rule_head)) {
3677 struct ice_fltr_mgmt_list_entry *entry;
3678 struct ice_fltr_mgmt_list_entry *tmp;
3680 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3681 ice_fltr_mgmt_list_entry, list_entry) {
3682 LIST_DEL(&entry->list_entry);
3683 ice_free(hw, entry);
3689 * ice_rem_adv_rule_info
3690 * @hw: pointer to the hardware structure
3691 * @rule_head: pointer to the switch list structure that we want to delete
3694 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3696 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3697 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3699 if (LIST_EMPTY(rule_head))
3702 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3703 ice_adv_fltr_mgmt_list_entry, list_entry) {
3704 LIST_DEL(&lst_itr->list_entry);
3705 ice_free(hw, lst_itr->lkups);
3706 ice_free(hw, lst_itr);
3711 * ice_rem_all_sw_rules_info
3712 * @hw: pointer to the hardware structure
3714 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3716 struct ice_switch_info *sw = hw->switch_info;
3719 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3720 struct LIST_HEAD_TYPE *rule_head;
3722 rule_head = &sw->recp_list[i].filt_rules;
3723 if (!sw->recp_list[i].adv_rule)
3724 ice_rem_sw_rule_info(hw, rule_head);
3726 ice_rem_adv_rule_info(hw, rule_head);
3731 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3732 * @pi: pointer to the port_info structure
3733 * @vsi_handle: VSI handle to set as default
3734 * @set: true to add the above mentioned switch rule, false to remove it
3735 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3737 * add filter rule to set/unset given VSI as default VSI for the switch
3738 * (represented by swid)
3741 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3744 struct ice_aqc_sw_rules_elem *s_rule;
3745 struct ice_fltr_info f_info;
3746 struct ice_hw *hw = pi->hw;
3747 enum ice_adminq_opc opcode;
3748 enum ice_status status;
3752 if (!ice_is_vsi_valid(hw, vsi_handle))
3753 return ICE_ERR_PARAM;
3754 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3756 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3757 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3758 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3760 return ICE_ERR_NO_MEMORY;
3762 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3764 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3765 f_info.flag = direction;
3766 f_info.fltr_act = ICE_FWD_TO_VSI;
3767 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3769 if (f_info.flag & ICE_FLTR_RX) {
3770 f_info.src = pi->lport;
3771 f_info.src_id = ICE_SRC_ID_LPORT;
3773 f_info.fltr_rule_id =
3774 pi->dflt_rx_vsi_rule_id;
3775 } else if (f_info.flag & ICE_FLTR_TX) {
3776 f_info.src_id = ICE_SRC_ID_VSI;
3777 f_info.src = hw_vsi_id;
3779 f_info.fltr_rule_id =
3780 pi->dflt_tx_vsi_rule_id;
3784 opcode = ice_aqc_opc_add_sw_rules;
3786 opcode = ice_aqc_opc_remove_sw_rules;
3788 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3790 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3791 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3794 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3796 if (f_info.flag & ICE_FLTR_TX) {
3797 pi->dflt_tx_vsi_num = hw_vsi_id;
3798 pi->dflt_tx_vsi_rule_id = index;
3799 } else if (f_info.flag & ICE_FLTR_RX) {
3800 pi->dflt_rx_vsi_num = hw_vsi_id;
3801 pi->dflt_rx_vsi_rule_id = index;
3804 if (f_info.flag & ICE_FLTR_TX) {
3805 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3806 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3807 } else if (f_info.flag & ICE_FLTR_RX) {
3808 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3809 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3814 ice_free(hw, s_rule);
3819 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3820 * @list_head: head of rule list
3821 * @f_info: rule information
3823 * Helper function to search for a unicast rule entry - this is to be used
3824 * to remove unicast MAC filter that is not shared with other VSIs on the
3827 * Returns pointer to entry storing the rule if found
3829 static struct ice_fltr_mgmt_list_entry *
3830 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3831 struct ice_fltr_info *f_info)
3833 struct ice_fltr_mgmt_list_entry *list_itr;
3835 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3837 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3838 sizeof(f_info->l_data)) &&
3839 f_info->fwd_id.hw_vsi_id ==
3840 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3841 f_info->flag == list_itr->fltr_info.flag)
3848 * ice_remove_mac_rule - remove a MAC based filter rule
3849 * @hw: pointer to the hardware structure
3850 * @m_list: list of MAC addresses and forwarding information
3851 * @recp_list: list from which function remove MAC address
3853 * This function removes either a MAC filter rule or a specific VSI from a
3854 * VSI list for a multicast MAC address.
3856 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3857 * ice_add_mac. Caller should be aware that this call will only work if all
3858 * the entries passed into m_list were added previously. It will not attempt to
3859 * do a partial remove of entries that were found.
3861 static enum ice_status
3862 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3863 struct ice_sw_recipe *recp_list)
3865 struct ice_fltr_list_entry *list_itr, *tmp;
3866 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3869 return ICE_ERR_PARAM;
3871 rule_lock = &recp_list->filt_rule_lock;
3872 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3874 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3875 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3878 if (l_type != ICE_SW_LKUP_MAC)
3879 return ICE_ERR_PARAM;
3881 vsi_handle = list_itr->fltr_info.vsi_handle;
3882 if (!ice_is_vsi_valid(hw, vsi_handle))
3883 return ICE_ERR_PARAM;
3885 list_itr->fltr_info.fwd_id.hw_vsi_id =
3886 ice_get_hw_vsi_num(hw, vsi_handle);
3887 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3888 /* Don't remove the unicast address that belongs to
3889 * another VSI on the switch, since it is not being
3892 ice_acquire_lock(rule_lock);
3893 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3894 &list_itr->fltr_info)) {
3895 ice_release_lock(rule_lock);
3896 return ICE_ERR_DOES_NOT_EXIST;
3898 ice_release_lock(rule_lock);
3900 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3902 if (list_itr->status)
3903 return list_itr->status;
3909 * ice_remove_mac - remove a MAC address based filter rule
3910 * @hw: pointer to the hardware structure
3911 * @m_list: list of MAC addresses and forwarding information
3915 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3917 struct ice_sw_recipe *recp_list;
3919 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3920 return ice_remove_mac_rule(hw, m_list, recp_list);
3924 * ice_remove_vlan_rule - Remove VLAN based filter rule
3925 * @hw: pointer to the hardware structure
3926 * @v_list: list of VLAN entries and forwarding information
3927 * @recp_list: list from which function remove VLAN
3929 static enum ice_status
3930 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3931 struct ice_sw_recipe *recp_list)
3933 struct ice_fltr_list_entry *v_list_itr, *tmp;
3935 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3937 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3939 if (l_type != ICE_SW_LKUP_VLAN)
3940 return ICE_ERR_PARAM;
3941 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3943 if (v_list_itr->status)
3944 return v_list_itr->status;
3950 * ice_remove_vlan - remove a VLAN address based filter rule
3951 * @hw: pointer to the hardware structure
3952 * @v_list: list of VLAN and forwarding information
3956 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3958 struct ice_sw_recipe *recp_list;
3961 return ICE_ERR_PARAM;
3963 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3964 return ice_remove_vlan_rule(hw, v_list, recp_list);
3968 * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
3969 * @hw: pointer to the hardware structure
3970 * @v_list: list of MAC VLAN entries and forwarding information
3971 * @recp_list: list from which function remove MAC VLAN
3973 static enum ice_status
3974 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3975 struct ice_sw_recipe *recp_list)
3977 struct ice_fltr_list_entry *v_list_itr, *tmp;
3979 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3980 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3982 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3984 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3985 return ICE_ERR_PARAM;
3986 v_list_itr->status =
3987 ice_remove_rule_internal(hw, recp_list,
3989 if (v_list_itr->status)
3990 return v_list_itr->status;
3996 * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
3997 * @hw: pointer to the hardware structure
3998 * @mv_list: list of MAC VLAN and forwarding information
4001 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4003 struct ice_sw_recipe *recp_list;
4005 if (!mv_list || !hw)
4006 return ICE_ERR_PARAM;
4008 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4009 return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4013 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4014 * @fm_entry: filter entry to inspect
4015 * @vsi_handle: VSI handle to compare with filter info
4018 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4020 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4021 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4022 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4023 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4028 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4029 * @hw: pointer to the hardware structure
4030 * @vsi_handle: VSI handle to remove filters from
4031 * @vsi_list_head: pointer to the list to add entry to
4032 * @fi: pointer to fltr_info of filter entry to copy & add
4034 * Helper function, used when creating a list of filters to remove from
4035 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4036 * original filter entry, with the exception of fltr_info.fltr_act and
4037 * fltr_info.fwd_id fields. These are set such that later logic can
4038 * extract which VSI to remove the fltr from, and pass on that information.
4040 static enum ice_status
4041 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4042 struct LIST_HEAD_TYPE *vsi_list_head,
4043 struct ice_fltr_info *fi)
4045 struct ice_fltr_list_entry *tmp;
4047 /* this memory is freed up in the caller function
4048 * once filters for this VSI are removed
4050 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4052 return ICE_ERR_NO_MEMORY;
4054 tmp->fltr_info = *fi;
4056 /* Overwrite these fields to indicate which VSI to remove filter from,
4057 * so find and remove logic can extract the information from the
4058 * list entries. Note that original entries will still have proper
4061 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4062 tmp->fltr_info.vsi_handle = vsi_handle;
4063 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4065 LIST_ADD(&tmp->list_entry, vsi_list_head);
4071 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4072 * @hw: pointer to the hardware structure
4073 * @vsi_handle: VSI handle to remove filters from
4074 * @lkup_list_head: pointer to the list that has certain lookup type filters
4075 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4077 * Locates all filters in lkup_list_head that are used by the given VSI,
4078 * and adds COPIES of those entries to vsi_list_head (intended to be used
4079 * to remove the listed filters).
4080 * Note that this means all entries in vsi_list_head must be explicitly
4081 * deallocated by the caller when done with list.
4083 static enum ice_status
4084 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4085 struct LIST_HEAD_TYPE *lkup_list_head,
4086 struct LIST_HEAD_TYPE *vsi_list_head)
4088 struct ice_fltr_mgmt_list_entry *fm_entry;
4089 enum ice_status status = ICE_SUCCESS;
4091 /* check to make sure VSI ID is valid and within boundary */
4092 if (!ice_is_vsi_valid(hw, vsi_handle))
4093 return ICE_ERR_PARAM;
4095 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4096 ice_fltr_mgmt_list_entry, list_entry) {
4097 struct ice_fltr_info *fi;
4099 fi = &fm_entry->fltr_info;
4100 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4103 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4112 * ice_determine_promisc_mask
4113 * @fi: filter info to parse
4115 * Helper function to determine which ICE_PROMISC_ mask corresponds
4116 * to given filter into.
4118 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4120 u16 vid = fi->l_data.mac_vlan.vlan_id;
4121 u8 *macaddr = fi->l_data.mac.mac_addr;
4122 bool is_tx_fltr = false;
4123 u8 promisc_mask = 0;
4125 if (fi->flag == ICE_FLTR_TX)
4128 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4129 promisc_mask |= is_tx_fltr ?
4130 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4131 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4132 promisc_mask |= is_tx_fltr ?
4133 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4134 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4135 promisc_mask |= is_tx_fltr ?
4136 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4138 promisc_mask |= is_tx_fltr ?
4139 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4141 return promisc_mask;
4145 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4146 * @hw: pointer to the hardware structure
4147 * @vsi_handle: VSI handle to retrieve info from
4148 * @promisc_mask: pointer to mask to be filled in
4149 * @vid: VLAN ID of promisc VLAN VSI
4152 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4155 struct ice_switch_info *sw = hw->switch_info;
4156 struct ice_fltr_mgmt_list_entry *itr;
4157 struct LIST_HEAD_TYPE *rule_head;
4158 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4160 if (!ice_is_vsi_valid(hw, vsi_handle))
4161 return ICE_ERR_PARAM;
4165 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4166 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4168 ice_acquire_lock(rule_lock);
4169 LIST_FOR_EACH_ENTRY(itr, rule_head,
4170 ice_fltr_mgmt_list_entry, list_entry) {
4171 /* Continue if this filter doesn't apply to this VSI or the
4172 * VSI ID is not in the VSI map for this filter
4174 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4177 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4179 ice_release_lock(rule_lock);
4185 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4186 * @hw: pointer to the hardware structure
4187 * @vsi_handle: VSI handle to retrieve info from
4188 * @promisc_mask: pointer to mask to be filled in
4189 * @vid: VLAN ID of promisc VLAN VSI
4192 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4195 struct ice_switch_info *sw = hw->switch_info;
4196 struct ice_fltr_mgmt_list_entry *itr;
4197 struct LIST_HEAD_TYPE *rule_head;
4198 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4200 if (!ice_is_vsi_valid(hw, vsi_handle))
4201 return ICE_ERR_PARAM;
4205 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4206 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4208 ice_acquire_lock(rule_lock);
4209 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4211 /* Continue if this filter doesn't apply to this VSI or the
4212 * VSI ID is not in the VSI map for this filter
4214 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4217 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4219 ice_release_lock(rule_lock);
4225 * ice_remove_promisc - Remove promisc based filter rules
4226 * @hw: pointer to the hardware structure
4227 * @recp_id: recipe ID for which the rule needs to removed
4228 * @v_list: list of promisc entries
4230 static enum ice_status
4231 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4232 struct LIST_HEAD_TYPE *v_list)
4234 struct ice_fltr_list_entry *v_list_itr, *tmp;
4235 struct ice_sw_recipe *recp_list;
4237 recp_list = &hw->switch_info->recp_list[recp_id];
4238 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4240 v_list_itr->status =
4241 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4242 if (v_list_itr->status)
4243 return v_list_itr->status;
4249 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4250 * @hw: pointer to the hardware structure
4251 * @vsi_handle: VSI handle to clear mode
4252 * @promisc_mask: mask of promiscuous config bits to clear
4253 * @vid: VLAN ID to clear VLAN promiscuous
4256 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4259 struct ice_switch_info *sw = hw->switch_info;
4260 struct ice_fltr_list_entry *fm_entry, *tmp;
4261 struct LIST_HEAD_TYPE remove_list_head;
4262 struct ice_fltr_mgmt_list_entry *itr;
4263 struct LIST_HEAD_TYPE *rule_head;
4264 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4265 enum ice_status status = ICE_SUCCESS;
4268 if (!ice_is_vsi_valid(hw, vsi_handle))
4269 return ICE_ERR_PARAM;
4271 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4272 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4274 recipe_id = ICE_SW_LKUP_PROMISC;
4276 rule_head = &sw->recp_list[recipe_id].filt_rules;
4277 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4279 INIT_LIST_HEAD(&remove_list_head);
4281 ice_acquire_lock(rule_lock);
4282 LIST_FOR_EACH_ENTRY(itr, rule_head,
4283 ice_fltr_mgmt_list_entry, list_entry) {
4284 struct ice_fltr_info *fltr_info;
4285 u8 fltr_promisc_mask = 0;
4287 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4289 fltr_info = &itr->fltr_info;
4291 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4292 vid != fltr_info->l_data.mac_vlan.vlan_id)
4295 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4297 /* Skip if filter is not completely specified by given mask */
4298 if (fltr_promisc_mask & ~promisc_mask)
4301 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4305 ice_release_lock(rule_lock);
4306 goto free_fltr_list;
4309 ice_release_lock(rule_lock);
4311 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4314 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4315 ice_fltr_list_entry, list_entry) {
4316 LIST_DEL(&fm_entry->list_entry);
4317 ice_free(hw, fm_entry);
4324 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4325 * @hw: pointer to the hardware structure
4326 * @vsi_handle: VSI handle to configure
4327 * @promisc_mask: mask of promiscuous config bits
4328 * @vid: VLAN ID to set VLAN promiscuous
4331 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4333 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4334 struct ice_fltr_list_entry f_list_entry;
4335 struct ice_fltr_info new_fltr;
4336 enum ice_status status = ICE_SUCCESS;
4342 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4344 if (!ice_is_vsi_valid(hw, vsi_handle))
4345 return ICE_ERR_PARAM;
4346 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4348 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4350 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4351 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4352 new_fltr.l_data.mac_vlan.vlan_id = vid;
4353 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4355 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4356 recipe_id = ICE_SW_LKUP_PROMISC;
4359 /* Separate filters must be set for each direction/packet type
4360 * combination, so we will loop over the mask value, store the
4361 * individual type, and clear it out in the input mask as it
4364 while (promisc_mask) {
4365 struct ice_sw_recipe *recp_list;
4371 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4372 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4373 pkt_type = UCAST_FLTR;
4374 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4375 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4376 pkt_type = UCAST_FLTR;
4378 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4379 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4380 pkt_type = MCAST_FLTR;
4381 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4382 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4383 pkt_type = MCAST_FLTR;
4385 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4386 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4387 pkt_type = BCAST_FLTR;
4388 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4389 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4390 pkt_type = BCAST_FLTR;
4394 /* Check for VLAN promiscuous flag */
4395 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4396 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4397 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4398 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4402 /* Set filter DA based on packet type */
4403 mac_addr = new_fltr.l_data.mac.mac_addr;
4404 if (pkt_type == BCAST_FLTR) {
4405 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4406 } else if (pkt_type == MCAST_FLTR ||
4407 pkt_type == UCAST_FLTR) {
4408 /* Use the dummy ether header DA */
4409 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4410 ICE_NONDMA_TO_NONDMA);
4411 if (pkt_type == MCAST_FLTR)
4412 mac_addr[0] |= 0x1; /* Set multicast bit */
4415 /* Need to reset this to zero for all iterations */
4418 new_fltr.flag |= ICE_FLTR_TX;
4419 new_fltr.src = hw_vsi_id;
4421 new_fltr.flag |= ICE_FLTR_RX;
4422 new_fltr.src = hw->port_info->lport;
4425 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4426 new_fltr.vsi_handle = vsi_handle;
4427 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4428 f_list_entry.fltr_info = new_fltr;
4429 recp_list = &hw->switch_info->recp_list[recipe_id];
4431 status = ice_add_rule_internal(hw, recp_list,
4432 hw->port_info->lport,
4434 if (status != ICE_SUCCESS)
4435 goto set_promisc_exit;
4443 * ice_set_vlan_vsi_promisc
4444 * @hw: pointer to the hardware structure
4445 * @vsi_handle: VSI handle to configure
4446 * @promisc_mask: mask of promiscuous config bits
4447 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4449 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4452 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4453 bool rm_vlan_promisc)
4455 struct ice_switch_info *sw = hw->switch_info;
4456 struct ice_fltr_list_entry *list_itr, *tmp;
4457 struct LIST_HEAD_TYPE vsi_list_head;
4458 struct LIST_HEAD_TYPE *vlan_head;
4459 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4460 enum ice_status status;
4463 INIT_LIST_HEAD(&vsi_list_head);
4464 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4465 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4466 ice_acquire_lock(vlan_lock);
4467 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4469 ice_release_lock(vlan_lock);
4471 goto free_fltr_list;
4473 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4475 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4476 if (rm_vlan_promisc)
4477 status = ice_clear_vsi_promisc(hw, vsi_handle,
4478 promisc_mask, vlan_id);
4480 status = ice_set_vsi_promisc(hw, vsi_handle,
4481 promisc_mask, vlan_id);
4487 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4488 ice_fltr_list_entry, list_entry) {
4489 LIST_DEL(&list_itr->list_entry);
4490 ice_free(hw, list_itr);
4496 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4497 * @hw: pointer to the hardware structure
4498 * @vsi_handle: VSI handle to remove filters from
4499 * @recp_list: recipe list from which function remove fltr
4500 * @lkup: switch rule filter lookup type
4503 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4504 struct ice_sw_recipe *recp_list,
4505 enum ice_sw_lkup_type lkup)
4507 struct ice_fltr_list_entry *fm_entry;
4508 struct LIST_HEAD_TYPE remove_list_head;
4509 struct LIST_HEAD_TYPE *rule_head;
4510 struct ice_fltr_list_entry *tmp;
4511 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4512 enum ice_status status;
4514 INIT_LIST_HEAD(&remove_list_head);
4515 rule_lock = &recp_list[lkup].filt_rule_lock;
4516 rule_head = &recp_list[lkup].filt_rules;
4517 ice_acquire_lock(rule_lock);
4518 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4520 ice_release_lock(rule_lock);
4525 case ICE_SW_LKUP_MAC:
4526 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4528 case ICE_SW_LKUP_VLAN:
4529 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4531 case ICE_SW_LKUP_PROMISC:
4532 case ICE_SW_LKUP_PROMISC_VLAN:
4533 ice_remove_promisc(hw, lkup, &remove_list_head);
4535 case ICE_SW_LKUP_MAC_VLAN:
4536 ice_remove_mac_vlan(hw, &remove_list_head);
4538 case ICE_SW_LKUP_ETHERTYPE:
4539 case ICE_SW_LKUP_ETHERTYPE_MAC:
4540 ice_remove_eth_mac(hw, &remove_list_head);
4542 case ICE_SW_LKUP_DFLT:
4543 ice_debug(hw, ICE_DBG_SW,
4544 "Remove filters for this lookup type hasn't been implemented yet\n");
4546 case ICE_SW_LKUP_LAST:
4547 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4551 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4552 ice_fltr_list_entry, list_entry) {
4553 LIST_DEL(&fm_entry->list_entry);
4554 ice_free(hw, fm_entry);
4559 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4560 * @hw: pointer to the hardware structure
4561 * @vsi_handle: VSI handle to remove filters from
4562 * @sw: pointer to switch info struct
4565 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4566 struct ice_switch_info *sw)
4568 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4570 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4571 sw->recp_list, ICE_SW_LKUP_MAC);
4572 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4573 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4574 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4575 sw->recp_list, ICE_SW_LKUP_PROMISC);
4576 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4577 sw->recp_list, ICE_SW_LKUP_VLAN);
4578 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4579 sw->recp_list, ICE_SW_LKUP_DFLT);
4580 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4581 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4582 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4583 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4584 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4585 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4589 * ice_remove_vsi_fltr - Remove all filters for a VSI
4590 * @hw: pointer to the hardware structure
4591 * @vsi_handle: VSI handle to remove filters from
4593 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4595 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4599 * ice_alloc_res_cntr - allocating resource counter
4600 * @hw: pointer to the hardware structure
4601 * @type: type of resource
4602 * @alloc_shared: if set it is shared else dedicated
4603 * @num_items: number of entries requested for FD resource type
4604 * @counter_id: counter index returned by AQ call
4607 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4610 struct ice_aqc_alloc_free_res_elem *buf;
4611 enum ice_status status;
4614 /* Allocate resource */
4615 buf_len = sizeof(*buf);
4616 buf = (struct ice_aqc_alloc_free_res_elem *)
4617 ice_malloc(hw, buf_len);
4619 return ICE_ERR_NO_MEMORY;
4621 buf->num_elems = CPU_TO_LE16(num_items);
4622 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4623 ICE_AQC_RES_TYPE_M) | alloc_shared);
4625 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4626 ice_aqc_opc_alloc_res, NULL);
4630 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4638 * ice_free_res_cntr - free resource counter
4639 * @hw: pointer to the hardware structure
4640 * @type: type of resource
4641 * @alloc_shared: if set it is shared else dedicated
4642 * @num_items: number of entries to be freed for FD resource type
4643 * @counter_id: counter ID resource which needs to be freed
4646 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4649 struct ice_aqc_alloc_free_res_elem *buf;
4650 enum ice_status status;
4654 buf_len = sizeof(*buf);
4655 buf = (struct ice_aqc_alloc_free_res_elem *)
4656 ice_malloc(hw, buf_len);
4658 return ICE_ERR_NO_MEMORY;
4660 buf->num_elems = CPU_TO_LE16(num_items);
4661 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4662 ICE_AQC_RES_TYPE_M) | alloc_shared);
4663 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4665 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4666 ice_aqc_opc_free_res, NULL);
4668 ice_debug(hw, ICE_DBG_SW,
4669 "counter resource could not be freed\n");
4676 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4677 * @hw: pointer to the hardware structure
4678 * @counter_id: returns counter index
4680 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4682 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4683 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4688 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4689 * @hw: pointer to the hardware structure
4690 * @counter_id: counter index to be freed
4692 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4694 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4695 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4700 * ice_alloc_res_lg_act - add large action resource
4701 * @hw: pointer to the hardware structure
4702 * @l_id: large action ID to fill it in
4703 * @num_acts: number of actions to hold with a large action entry
4705 static enum ice_status
4706 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4708 struct ice_aqc_alloc_free_res_elem *sw_buf;
4709 enum ice_status status;
4712 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4713 return ICE_ERR_PARAM;
4715 /* Allocate resource for large action */
4716 buf_len = sizeof(*sw_buf);
4717 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4718 ice_malloc(hw, buf_len);
4720 return ICE_ERR_NO_MEMORY;
4722 sw_buf->num_elems = CPU_TO_LE16(1);
4724 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4725 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4726 * If num_acts is greater than 2, then use
4727 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4728 * The num_acts cannot exceed 4. This was ensured at the
4729 * beginning of the function.
4732 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4733 else if (num_acts == 2)
4734 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4736 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4738 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4739 ice_aqc_opc_alloc_res, NULL);
4741 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4743 ice_free(hw, sw_buf);
4748 * ice_add_mac_with_sw_marker - add filter with sw marker
4749 * @hw: pointer to the hardware structure
4750 * @f_info: filter info structure containing the MAC filter information
4751 * @sw_marker: sw marker to tag the Rx descriptor with
4754 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4757 struct ice_fltr_mgmt_list_entry *m_entry;
4758 struct ice_fltr_list_entry fl_info;
4759 struct ice_sw_recipe *recp_list;
4760 struct LIST_HEAD_TYPE l_head;
4761 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4762 enum ice_status ret;
4766 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4767 return ICE_ERR_PARAM;
4769 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4770 return ICE_ERR_PARAM;
4772 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4773 return ICE_ERR_PARAM;
4775 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4776 return ICE_ERR_PARAM;
4777 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4779 /* Add filter if it doesn't exist so then the adding of large
4780 * action always results in update
4783 INIT_LIST_HEAD(&l_head);
4784 fl_info.fltr_info = *f_info;
4785 LIST_ADD(&fl_info.list_entry, &l_head);
4787 entry_exists = false;
4788 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4789 hw->port_info->lport);
4790 if (ret == ICE_ERR_ALREADY_EXISTS)
4791 entry_exists = true;
4795 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4796 rule_lock = &recp_list->filt_rule_lock;
4797 ice_acquire_lock(rule_lock);
4798 /* Get the book keeping entry for the filter */
4799 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4803 /* If counter action was enabled for this rule then don't enable
4804 * sw marker large action
4806 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4807 ret = ICE_ERR_PARAM;
4811 /* if same marker was added before */
4812 if (m_entry->sw_marker_id == sw_marker) {
4813 ret = ICE_ERR_ALREADY_EXISTS;
4817 /* Allocate a hardware table entry to hold large act. Three actions
4818 * for marker based large action
4820 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4824 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4827 /* Update the switch rule to add the marker action */
4828 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4830 ice_release_lock(rule_lock);
4835 ice_release_lock(rule_lock);
4836 /* only remove entry if it did not exist previously */
4838 ret = ice_remove_mac(hw, &l_head);
4844 * ice_add_mac_with_counter - add filter with counter enabled
4845 * @hw: pointer to the hardware structure
4846 * @f_info: pointer to filter info structure containing the MAC filter
4850 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4852 struct ice_fltr_mgmt_list_entry *m_entry;
4853 struct ice_fltr_list_entry fl_info;
4854 struct ice_sw_recipe *recp_list;
4855 struct LIST_HEAD_TYPE l_head;
4856 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4857 enum ice_status ret;
4862 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4863 return ICE_ERR_PARAM;
4865 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4866 return ICE_ERR_PARAM;
4868 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4869 return ICE_ERR_PARAM;
4870 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4871 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4873 entry_exist = false;
4875 rule_lock = &recp_list->filt_rule_lock;
4877 /* Add filter if it doesn't exist so then the adding of large
4878 * action always results in update
4880 INIT_LIST_HEAD(&l_head);
4882 fl_info.fltr_info = *f_info;
4883 LIST_ADD(&fl_info.list_entry, &l_head);
4885 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4886 hw->port_info->lport);
4887 if (ret == ICE_ERR_ALREADY_EXISTS)
4892 ice_acquire_lock(rule_lock);
4893 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4895 ret = ICE_ERR_BAD_PTR;
4899 /* Don't enable counter for a filter for which sw marker was enabled */
4900 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4901 ret = ICE_ERR_PARAM;
4905 /* If a counter was already enabled then don't need to add again */
4906 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4907 ret = ICE_ERR_ALREADY_EXISTS;
4911 /* Allocate a hardware table entry to VLAN counter */
4912 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4916 /* Allocate a hardware table entry to hold large act. Two actions for
4917 * counter based large action
4919 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4923 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4926 /* Update the switch rule to add the counter action */
4927 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4929 ice_release_lock(rule_lock);
4934 ice_release_lock(rule_lock);
4935 /* only remove entry if it did not exist previously */
4937 ret = ice_remove_mac(hw, &l_head);
4942 /* This is mapping table entry that maps every word within a given protocol
4943 * structure to the real byte offset as per the specification of that
4945 * for example dst address is 3 words in ethertype header and corresponding
4946 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4947 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4948 * matching entry describing its field. This needs to be updated if new
4949 * structure is added to that union.
4951 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4952 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4953 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4954 { ICE_ETYPE_OL, { 0 } },
4955 { ICE_VLAN_OFOS, { 0, 2 } },
4956 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4957 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4958 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4959 26, 28, 30, 32, 34, 36, 38 } },
4960 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4961 26, 28, 30, 32, 34, 36, 38 } },
4962 { ICE_TCP_IL, { 0, 2 } },
4963 { ICE_UDP_OF, { 0, 2 } },
4964 { ICE_UDP_ILOS, { 0, 2 } },
4965 { ICE_SCTP_IL, { 0, 2 } },
4966 { ICE_VXLAN, { 8, 10, 12, 14 } },
4967 { ICE_GENEVE, { 8, 10, 12, 14 } },
4968 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4969 { ICE_NVGRE, { 0, 2, 4, 6 } },
4970 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4971 { ICE_PPPOE, { 0, 2, 4, 6 } },
4974 /* The following table describes preferred grouping of recipes.
4975 * If a recipe that needs to be programmed is a superset or matches one of the
4976 * following combinations, then the recipe needs to be chained as per the
4980 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4981 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4982 { ICE_MAC_IL, ICE_MAC_IL_HW },
4983 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4984 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4985 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4986 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4987 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4988 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4989 { ICE_TCP_IL, ICE_TCP_IL_HW },
4990 { ICE_UDP_OF, ICE_UDP_OF_HW },
4991 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4992 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4993 { ICE_VXLAN, ICE_UDP_OF_HW },
4994 { ICE_GENEVE, ICE_UDP_OF_HW },
4995 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4996 { ICE_NVGRE, ICE_GRE_OF_HW },
4997 { ICE_GTP, ICE_UDP_OF_HW },
4998 { ICE_PPPOE, ICE_PPPOE_HW },
5002 * ice_find_recp - find a recipe
5003 * @hw: pointer to the hardware structure
5004 * @lkup_exts: extension sequence to match
5006 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5008 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
5010 bool refresh_required = true;
5011 struct ice_sw_recipe *recp;
5014 /* Walk through existing recipes to find a match */
5015 recp = hw->switch_info->recp_list;
5016 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5017 /* If recipe was not created for this ID, in SW bookkeeping,
5018 * check if FW has an entry for this recipe. If the FW has an
5019 * entry update it in our SW bookkeeping and continue with the
5022 if (!recp[i].recp_created)
5023 if (ice_get_recp_frm_fw(hw,
5024 hw->switch_info->recp_list, i,
5028 /* Skip inverse action recipes */
5029 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5030 ICE_AQ_RECIPE_ACT_INV_ACT)
5033 /* if number of words we are looking for match */
5034 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5035 struct ice_fv_word *a = lkup_exts->fv_words;
5036 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5040 for (p = 0; p < lkup_exts->n_val_words; p++) {
5041 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5043 if (a[p].off == b[q].off &&
5044 a[p].prot_id == b[q].prot_id)
5045 /* Found the "p"th word in the
5050 /* After walking through all the words in the
5051 * "i"th recipe if "p"th word was not found then
5052 * this recipe is not what we are looking for.
5053 * So break out from this loop and try the next
5056 if (q >= recp[i].lkup_exts.n_val_words) {
5061 /* If for "i"th recipe the found was never set to false
5062 * then it means we found our match
5065 return i; /* Return the recipe ID */
5068 return ICE_MAX_NUM_RECIPES;
5072 * ice_prot_type_to_id - get protocol ID from protocol type
5073 * @type: protocol type
5074 * @id: pointer to variable that will receive the ID
5076 * Returns true if found, false otherwise
5078 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5082 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5083 if (ice_prot_id_tbl[i].type == type) {
5084 *id = ice_prot_id_tbl[i].protocol_id;
5091 * ice_find_valid_words - count valid words
5092 * @rule: advanced rule with lookup information
5093 * @lkup_exts: byte offset extractions of the words that are valid
5095 * calculate valid words in a lookup rule using mask value
5098 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5099 struct ice_prot_lkup_ext *lkup_exts)
5101 u8 j, word, prot_id, ret_val;
5103 if (!ice_prot_type_to_id(rule->type, &prot_id))
5106 word = lkup_exts->n_val_words;
5108 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5109 if (((u16 *)&rule->m_u)[j] &&
5110 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5111 /* No more space to accommodate */
5112 if (word >= ICE_MAX_CHAIN_WORDS)
5114 lkup_exts->fv_words[word].off =
5115 ice_prot_ext[rule->type].offs[j];
5116 lkup_exts->fv_words[word].prot_id =
5117 ice_prot_id_tbl[rule->type].protocol_id;
5118 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5122 ret_val = word - lkup_exts->n_val_words;
5123 lkup_exts->n_val_words = word;
5129 * ice_create_first_fit_recp_def - Create a recipe grouping
5130 * @hw: pointer to the hardware structure
5131 * @lkup_exts: an array of protocol header extractions
5132 * @rg_list: pointer to a list that stores new recipe groups
5133 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5135 * Using first fit algorithm, take all the words that are still not done
5136 * and start grouping them in 4-word groups. Each group makes up one
5139 static enum ice_status
5140 ice_create_first_fit_recp_def(struct ice_hw *hw,
5141 struct ice_prot_lkup_ext *lkup_exts,
5142 struct LIST_HEAD_TYPE *rg_list,
5145 struct ice_pref_recipe_group *grp = NULL;
5150 if (!lkup_exts->n_val_words) {
5151 struct ice_recp_grp_entry *entry;
5153 entry = (struct ice_recp_grp_entry *)
5154 ice_malloc(hw, sizeof(*entry));
5156 return ICE_ERR_NO_MEMORY;
5157 LIST_ADD(&entry->l_entry, rg_list);
5158 grp = &entry->r_group;
5160 grp->n_val_pairs = 0;
5163 /* Walk through every word in the rule to check if it is not done. If so
5164 * then this word needs to be part of a new recipe.
5166 for (j = 0; j < lkup_exts->n_val_words; j++)
5167 if (!ice_is_bit_set(lkup_exts->done, j)) {
5169 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5170 struct ice_recp_grp_entry *entry;
5172 entry = (struct ice_recp_grp_entry *)
5173 ice_malloc(hw, sizeof(*entry));
5175 return ICE_ERR_NO_MEMORY;
5176 LIST_ADD(&entry->l_entry, rg_list);
5177 grp = &entry->r_group;
5181 grp->pairs[grp->n_val_pairs].prot_id =
5182 lkup_exts->fv_words[j].prot_id;
5183 grp->pairs[grp->n_val_pairs].off =
5184 lkup_exts->fv_words[j].off;
5185 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5193 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5194 * @hw: pointer to the hardware structure
5195 * @fv_list: field vector with the extraction sequence information
5196 * @rg_list: recipe groupings with protocol-offset pairs
5198 * Helper function to fill in the field vector indices for protocol-offset
5199 * pairs. These indexes are then ultimately programmed into a recipe.
5201 static enum ice_status
5202 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5203 struct LIST_HEAD_TYPE *rg_list)
5205 struct ice_sw_fv_list_entry *fv;
5206 struct ice_recp_grp_entry *rg;
5207 struct ice_fv_word *fv_ext;
5209 if (LIST_EMPTY(fv_list))
5212 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5213 fv_ext = fv->fv_ptr->ew;
5215 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5218 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5219 struct ice_fv_word *pr;
5224 pr = &rg->r_group.pairs[i];
5225 mask = rg->r_group.mask[i];
5227 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5228 if (fv_ext[j].prot_id == pr->prot_id &&
5229 fv_ext[j].off == pr->off) {
5232 /* Store index of field vector */
5234 /* Mask is given by caller as big
5235 * endian, but sent to FW as little
5238 rg->fv_mask[i] = mask << 8 | mask >> 8;
5242 /* Protocol/offset could not be found, caller gave an
5246 return ICE_ERR_PARAM;
5254 * ice_find_free_recp_res_idx - find free result indexes for recipe
5255 * @hw: pointer to hardware structure
5256 * @profiles: bitmap of profiles that will be associated with the new recipe
5257 * @free_idx: pointer to variable to receive the free index bitmap
5259 * The algorithm used here is:
5260 * 1. When creating a new recipe, create a set P which contains all
5261 * Profiles that will be associated with our new recipe
5263 * 2. For each Profile p in set P:
5264 * a. Add all recipes associated with Profile p into set R
5265 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5266 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5267 * i. Or just assume they all have the same possible indexes:
5269 * i.e., PossibleIndexes = 0x0000F00000000000
5271 * 3. For each Recipe r in set R:
5272 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5273 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5275 * FreeIndexes will contain the bits indicating the indexes free for use,
5276 * then the code needs to update the recipe[r].used_result_idx_bits to
5277 * indicate which indexes were selected for use by this recipe.
5280 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5281 ice_bitmap_t *free_idx)
5283 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5284 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5285 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5289 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5290 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5291 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5292 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5294 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5295 ice_set_bit(count, possible_idx);
5297 /* For each profile we are going to associate the recipe with, add the
5298 * recipes that are associated with that profile. This will give us
5299 * the set of recipes that our recipe may collide with. Also, determine
5300 * what possible result indexes are usable given this set of profiles.
5303 while (ICE_MAX_NUM_PROFILES >
5304 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5305 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5306 ICE_MAX_NUM_RECIPES);
5307 ice_and_bitmap(possible_idx, possible_idx,
5308 hw->switch_info->prof_res_bm[bit],
5313 /* For each recipe that our new recipe may collide with, determine
5314 * which indexes have been used.
5316 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5317 if (ice_is_bit_set(recipes, bit)) {
5318 ice_or_bitmap(used_idx, used_idx,
5319 hw->switch_info->recp_list[bit].res_idxs,
5323 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5325 /* return number of free indexes */
5328 while (ICE_MAX_FV_WORDS >
5329 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5338 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5339 * @hw: pointer to hardware structure
5340 * @rm: recipe management list entry
5341 * @match_tun: if field vector index for tunnel needs to be programmed
5342 * @profiles: bitmap of profiles that will be assocated.
5344 static enum ice_status
5345 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5346 bool match_tun, ice_bitmap_t *profiles)
5348 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5349 struct ice_aqc_recipe_data_elem *tmp;
5350 struct ice_aqc_recipe_data_elem *buf;
5351 struct ice_recp_grp_entry *entry;
5352 enum ice_status status;
5358 /* When more than one recipe are required, another recipe is needed to
5359 * chain them together. Matching a tunnel metadata ID takes up one of
5360 * the match fields in the chaining recipe reducing the number of
5361 * chained recipes by one.
5363 /* check number of free result indices */
5364 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5365 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5367 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5368 free_res_idx, rm->n_grp_count);
5370 if (rm->n_grp_count > 1) {
5371 if (rm->n_grp_count > free_res_idx)
5372 return ICE_ERR_MAX_LIMIT;
5377 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5378 ICE_MAX_NUM_RECIPES,
5381 return ICE_ERR_NO_MEMORY;
5383 buf = (struct ice_aqc_recipe_data_elem *)
5384 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5386 status = ICE_ERR_NO_MEMORY;
5390 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5391 recipe_count = ICE_MAX_NUM_RECIPES;
5392 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5394 if (status || recipe_count == 0)
5397 /* Allocate the recipe resources, and configure them according to the
5398 * match fields from protocol headers and extracted field vectors.
5400 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5401 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5404 status = ice_alloc_recipe(hw, &entry->rid);
5408 /* Clear the result index of the located recipe, as this will be
5409 * updated, if needed, later in the recipe creation process.
5411 tmp[0].content.result_indx = 0;
5413 buf[recps] = tmp[0];
5414 buf[recps].recipe_indx = (u8)entry->rid;
5415 /* if the recipe is a non-root recipe RID should be programmed
5416 * as 0 for the rules to be applied correctly.
5418 buf[recps].content.rid = 0;
5419 ice_memset(&buf[recps].content.lkup_indx, 0,
5420 sizeof(buf[recps].content.lkup_indx),
5423 /* All recipes use look-up index 0 to match switch ID. */
5424 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5425 buf[recps].content.mask[0] =
5426 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5427 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5430 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5431 buf[recps].content.lkup_indx[i] = 0x80;
5432 buf[recps].content.mask[i] = 0;
5435 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5436 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5437 buf[recps].content.mask[i + 1] =
5438 CPU_TO_LE16(entry->fv_mask[i]);
5441 if (rm->n_grp_count > 1) {
5442 /* Checks to see if there really is a valid result index
5445 if (chain_idx >= ICE_MAX_FV_WORDS) {
5446 ice_debug(hw, ICE_DBG_SW,
5447 "No chain index available\n");
5448 status = ICE_ERR_MAX_LIMIT;
5452 entry->chain_idx = chain_idx;
5453 buf[recps].content.result_indx =
5454 ICE_AQ_RECIPE_RESULT_EN |
5455 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5456 ICE_AQ_RECIPE_RESULT_DATA_M);
5457 ice_clear_bit(chain_idx, result_idx_bm);
5458 chain_idx = ice_find_first_bit(result_idx_bm,
5462 /* fill recipe dependencies */
5463 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5464 ICE_MAX_NUM_RECIPES);
5465 ice_set_bit(buf[recps].recipe_indx,
5466 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5467 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5471 if (rm->n_grp_count == 1) {
5472 rm->root_rid = buf[0].recipe_indx;
5473 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5474 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5475 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5476 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5477 sizeof(buf[0].recipe_bitmap),
5478 ICE_NONDMA_TO_NONDMA);
5480 status = ICE_ERR_BAD_PTR;
5483 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5484 * the recipe which is getting created if specified
5485 * by user. Usually any advanced switch filter, which results
5486 * into new extraction sequence, ended up creating a new recipe
5487 * of type ROOT and usually recipes are associated with profiles
5488 * Switch rule referreing newly created recipe, needs to have
5489 * either/or 'fwd' or 'join' priority, otherwise switch rule
5490 * evaluation will not happen correctly. In other words, if
5491 * switch rule to be evaluated on priority basis, then recipe
5492 * needs to have priority, otherwise it will be evaluated last.
5494 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5496 struct ice_recp_grp_entry *last_chain_entry;
5499 /* Allocate the last recipe that will chain the outcomes of the
5500 * other recipes together
5502 status = ice_alloc_recipe(hw, &rid);
5506 buf[recps].recipe_indx = (u8)rid;
5507 buf[recps].content.rid = (u8)rid;
5508 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5509 /* the new entry created should also be part of rg_list to
5510 * make sure we have complete recipe
5512 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5513 sizeof(*last_chain_entry));
5514 if (!last_chain_entry) {
5515 status = ICE_ERR_NO_MEMORY;
5518 last_chain_entry->rid = rid;
5519 ice_memset(&buf[recps].content.lkup_indx, 0,
5520 sizeof(buf[recps].content.lkup_indx),
5522 /* All recipes use look-up index 0 to match switch ID. */
5523 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5524 buf[recps].content.mask[0] =
5525 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5526 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5527 buf[recps].content.lkup_indx[i] =
5528 ICE_AQ_RECIPE_LKUP_IGNORE;
5529 buf[recps].content.mask[i] = 0;
5533 /* update r_bitmap with the recp that is used for chaining */
5534 ice_set_bit(rid, rm->r_bitmap);
5535 /* this is the recipe that chains all the other recipes so it
5536 * should not have a chaining ID to indicate the same
5538 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5539 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5541 last_chain_entry->fv_idx[i] = entry->chain_idx;
5542 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5543 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5544 ice_set_bit(entry->rid, rm->r_bitmap);
5546 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5547 if (sizeof(buf[recps].recipe_bitmap) >=
5548 sizeof(rm->r_bitmap)) {
5549 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5550 sizeof(buf[recps].recipe_bitmap),
5551 ICE_NONDMA_TO_NONDMA);
5553 status = ICE_ERR_BAD_PTR;
5556 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5558 /* To differentiate among different UDP tunnels, a meta data ID
5562 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5563 buf[recps].content.mask[i] =
5564 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5568 rm->root_rid = (u8)rid;
5570 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5574 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5575 ice_release_change_lock(hw);
5579 /* Every recipe that just got created add it to the recipe
5582 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5583 struct ice_switch_info *sw = hw->switch_info;
5584 bool is_root, idx_found = false;
5585 struct ice_sw_recipe *recp;
5586 u16 idx, buf_idx = 0;
5588 /* find buffer index for copying some data */
5589 for (idx = 0; idx < rm->n_grp_count; idx++)
5590 if (buf[idx].recipe_indx == entry->rid) {
5596 status = ICE_ERR_OUT_OF_RANGE;
5600 recp = &sw->recp_list[entry->rid];
5601 is_root = (rm->root_rid == entry->rid);
5602 recp->is_root = is_root;
5604 recp->root_rid = entry->rid;
5605 recp->big_recp = (is_root && rm->n_grp_count > 1);
5607 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5608 entry->r_group.n_val_pairs *
5609 sizeof(struct ice_fv_word),
5610 ICE_NONDMA_TO_NONDMA);
5612 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5613 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5615 /* Copy non-result fv index values and masks to recipe. This
5616 * call will also update the result recipe bitmask.
5618 ice_collect_result_idx(&buf[buf_idx], recp);
5620 /* for non-root recipes, also copy to the root, this allows
5621 * easier matching of a complete chained recipe
5624 ice_collect_result_idx(&buf[buf_idx],
5625 &sw->recp_list[rm->root_rid]);
5627 recp->n_ext_words = entry->r_group.n_val_pairs;
5628 recp->chain_idx = entry->chain_idx;
5629 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5630 recp->n_grp_count = rm->n_grp_count;
5631 recp->tun_type = rm->tun_type;
5632 recp->recp_created = true;
5647 * ice_create_recipe_group - creates recipe group
5648 * @hw: pointer to hardware structure
5649 * @rm: recipe management list entry
5650 * @lkup_exts: lookup elements
5652 static enum ice_status
5653 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5654 struct ice_prot_lkup_ext *lkup_exts)
5656 enum ice_status status;
5659 rm->n_grp_count = 0;
5661 /* Create recipes for words that are marked not done by packing them
5664 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5665 &rm->rg_list, &recp_count);
5667 rm->n_grp_count += recp_count;
5668 rm->n_ext_words = lkup_exts->n_val_words;
5669 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5670 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5671 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5672 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5679 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5680 * @hw: pointer to hardware structure
5681 * @lkups: lookup elements or match criteria for the advanced recipe, one
5682 * structure per protocol header
5683 * @lkups_cnt: number of protocols
5684 * @bm: bitmap of field vectors to consider
5685 * @fv_list: pointer to a list that holds the returned field vectors
5687 static enum ice_status
5688 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5689 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5691 enum ice_status status;
5698 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5700 return ICE_ERR_NO_MEMORY;
5702 for (i = 0; i < lkups_cnt; i++)
5703 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5704 status = ICE_ERR_CFG;
5708 /* Find field vectors that include all specified protocol types */
5709 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5712 ice_free(hw, prot_ids);
5717 * ice_add_special_words - Add words that are not protocols, such as metadata
5718 * @rinfo: other information regarding the rule e.g. priority and action info
5719 * @lkup_exts: lookup word structure
5721 static enum ice_status
5722 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5723 struct ice_prot_lkup_ext *lkup_exts)
5725 /* If this is a tunneled packet, then add recipe index to match the
5726 * tunnel bit in the packet metadata flags.
5728 if (rinfo->tun_type != ICE_NON_TUN) {
5729 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5730 u8 word = lkup_exts->n_val_words++;
5732 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5733 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5735 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5737 return ICE_ERR_MAX_LIMIT;
5744 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5745 * @hw: pointer to hardware structure
5746 * @rinfo: other information regarding the rule e.g. priority and action info
5747 * @bm: pointer to memory for returning the bitmap of field vectors
5750 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5753 enum ice_prof_type prof_type;
5755 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
5757 switch (rinfo->tun_type) {
5759 prof_type = ICE_PROF_NON_TUN;
5761 case ICE_ALL_TUNNELS:
5762 prof_type = ICE_PROF_TUN_ALL;
5764 case ICE_SW_TUN_VXLAN_GPE:
5765 case ICE_SW_TUN_GENEVE:
5766 case ICE_SW_TUN_VXLAN:
5767 case ICE_SW_TUN_UDP:
5768 case ICE_SW_TUN_GTP:
5769 prof_type = ICE_PROF_TUN_UDP;
5771 case ICE_SW_TUN_NVGRE:
5772 prof_type = ICE_PROF_TUN_GRE;
5774 case ICE_SW_TUN_PPPOE:
5775 prof_type = ICE_PROF_TUN_PPPOE;
5777 case ICE_SW_TUN_PROFID_IPV6_ESP:
5778 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
5780 case ICE_SW_TUN_PROFID_IPV6_AH:
5781 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
5783 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
5784 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
5786 case ICE_SW_TUN_AND_NON_TUN:
5788 prof_type = ICE_PROF_ALL;
5792 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5796 * ice_is_prof_rule - determine if rule type is a profile rule
5797 * @type: the rule type
5799 * if the rule type is a profile rule, that means that there no field value
5800 * match required, in this case just a profile hit is required.
5802 static bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
5805 case ICE_SW_TUN_PROFID_IPV6_ESP:
5806 case ICE_SW_TUN_PROFID_IPV6_AH:
5807 case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
5817 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5818 * @hw: pointer to hardware structure
5819 * @lkups: lookup elements or match criteria for the advanced recipe, one
5820 * structure per protocol header
5821 * @lkups_cnt: number of protocols
5822 * @rinfo: other information regarding the rule e.g. priority and action info
5823 * @rid: return the recipe ID of the recipe created
5825 static enum ice_status
5826 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5827 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5829 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5830 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5831 struct ice_prot_lkup_ext *lkup_exts;
5832 struct ice_recp_grp_entry *r_entry;
5833 struct ice_sw_fv_list_entry *fvit;
5834 struct ice_recp_grp_entry *r_tmp;
5835 struct ice_sw_fv_list_entry *tmp;
5836 enum ice_status status = ICE_SUCCESS;
5837 struct ice_sw_recipe *rm;
5838 bool match_tun = false;
5841 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
5842 return ICE_ERR_PARAM;
5844 lkup_exts = (struct ice_prot_lkup_ext *)
5845 ice_malloc(hw, sizeof(*lkup_exts));
5847 return ICE_ERR_NO_MEMORY;
5849 /* Determine the number of words to be matched and if it exceeds a
5850 * recipe's restrictions
5852 for (i = 0; i < lkups_cnt; i++) {
5855 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5856 status = ICE_ERR_CFG;
5857 goto err_free_lkup_exts;
5860 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5862 status = ICE_ERR_CFG;
5863 goto err_free_lkup_exts;
5867 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5869 status = ICE_ERR_NO_MEMORY;
5870 goto err_free_lkup_exts;
5873 /* Get field vectors that contain fields extracted from all the protocol
5874 * headers being programmed.
5876 INIT_LIST_HEAD(&rm->fv_list);
5877 INIT_LIST_HEAD(&rm->rg_list);
5879 /* Get bitmap of field vectors (profiles) that are compatible with the
5880 * rule request; only these will be searched in the subsequent call to
5883 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5885 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5889 /* Group match words into recipes using preferred recipe grouping
5892 status = ice_create_recipe_group(hw, rm, lkup_exts);
5896 /* There is only profile for UDP tunnels. So, it is necessary to use a
5897 * metadata ID flag to differentiate different tunnel types. A separate
5898 * recipe needs to be used for the metadata.
5900 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5901 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5902 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5905 /* set the recipe priority if specified */
5906 rm->priority = (u8)rinfo->priority;
5908 /* Find offsets from the field vector. Pick the first one for all the
5911 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5915 /* An empty FV list means to use all the profiles returned in the
5918 if (LIST_EMPTY(&rm->fv_list)) {
5921 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
5922 if (ice_is_bit_set(fv_bitmap, j)) {
5923 struct ice_sw_fv_list_entry *fvl;
5925 fvl = (struct ice_sw_fv_list_entry *)
5926 ice_malloc(hw, sizeof(*fvl));
5930 fvl->profile_id = j;
5931 LIST_ADD(&fvl->list_entry, &rm->fv_list);
5935 /* get bitmap of all profiles the recipe will be associated with */
5936 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5937 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5939 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5940 ice_set_bit((u16)fvit->profile_id, profiles);
5943 /* Create any special protocol/offset pairs, such as looking at tunnel
5944 * bits by extracting metadata
5946 status = ice_add_special_words(rinfo, lkup_exts);
5948 goto err_free_lkup_exts;
5950 /* Look for a recipe which matches our requested fv / mask list */
5951 *rid = ice_find_recp(hw, lkup_exts);
5952 if (*rid < ICE_MAX_NUM_RECIPES)
5953 /* Success if found a recipe that match the existing criteria */
5956 /* Recipe we need does not exist, add a recipe */
5957 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5961 /* Associate all the recipes created with all the profiles in the
5962 * common field vector.
5964 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5966 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5969 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5970 (u8 *)r_bitmap, NULL);
5974 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5975 ICE_MAX_NUM_RECIPES);
5976 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5980 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5983 ice_release_change_lock(hw);
5988 /* Update profile to recipe bitmap array */
5989 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5990 ICE_MAX_NUM_RECIPES);
5992 /* Update recipe to profile bitmap array */
5993 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5994 if (ice_is_bit_set(r_bitmap, j))
5995 ice_set_bit((u16)fvit->profile_id,
5996 recipe_to_profile[j]);
5999 *rid = rm->root_rid;
6000 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6001 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6003 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6004 ice_recp_grp_entry, l_entry) {
6005 LIST_DEL(&r_entry->l_entry);
6006 ice_free(hw, r_entry);
6009 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6011 LIST_DEL(&fvit->list_entry);
6016 ice_free(hw, rm->root_buf);
6021 ice_free(hw, lkup_exts);
6027 * ice_find_dummy_packet - find dummy packet by tunnel type
6029 * @lkups: lookup elements or match criteria for the advanced recipe, one
6030 * structure per protocol header
6031 * @lkups_cnt: number of protocols
6032 * @tun_type: tunnel type from the match criteria
6033 * @pkt: dummy packet to fill according to filter match criteria
6034 * @pkt_len: packet length of dummy packet
6035 * @offsets: pointer to receive the pointer to the offsets for the packet
6038 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6039 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
6041 const struct ice_dummy_pkt_offsets **offsets)
6043 bool tcp = false, udp = false, ipv6 = false, vlan = false;
6047 for (i = 0; i < lkups_cnt; i++) {
6048 if (lkups[i].type == ICE_UDP_ILOS)
6050 else if (lkups[i].type == ICE_TCP_IL)
6052 else if (lkups[i].type == ICE_IPV6_OFOS)
6054 else if (lkups[i].type == ICE_VLAN_OFOS)
6056 else if (lkups[i].type == ICE_IPV4_OFOS &&
6057 lkups[i].h_u.ipv4_hdr.protocol ==
6058 ICE_IPV4_NVGRE_PROTO_ID &&
6059 lkups[i].m_u.ipv4_hdr.protocol ==
6062 else if (lkups[i].type == ICE_PPPOE &&
6063 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
6064 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
6065 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
6068 else if (lkups[i].type == ICE_ETYPE_OL &&
6069 lkups[i].h_u.ethertype.ethtype_id ==
6070 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
6071 lkups[i].m_u.ethertype.ethtype_id ==
6076 if (tun_type == ICE_SW_TUN_GTP) {
6077 *pkt = dummy_udp_gtp_packet;
6078 *pkt_len = sizeof(dummy_udp_gtp_packet);
6079 *offsets = dummy_udp_gtp_packet_offsets;
6082 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
6083 *pkt = dummy_pppoe_ipv6_packet;
6084 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
6085 *offsets = dummy_pppoe_packet_offsets;
6087 } else if (tun_type == ICE_SW_TUN_PPPOE) {
6088 *pkt = dummy_pppoe_ipv4_packet;
6089 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
6090 *offsets = dummy_pppoe_packet_offsets;
6094 if (tun_type == ICE_ALL_TUNNELS) {
6095 *pkt = dummy_gre_udp_packet;
6096 *pkt_len = sizeof(dummy_gre_udp_packet);
6097 *offsets = dummy_gre_udp_packet_offsets;
6101 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
6103 *pkt = dummy_gre_tcp_packet;
6104 *pkt_len = sizeof(dummy_gre_tcp_packet);
6105 *offsets = dummy_gre_tcp_packet_offsets;
6109 *pkt = dummy_gre_udp_packet;
6110 *pkt_len = sizeof(dummy_gre_udp_packet);
6111 *offsets = dummy_gre_udp_packet_offsets;
6115 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6116 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6118 *pkt = dummy_udp_tun_tcp_packet;
6119 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6120 *offsets = dummy_udp_tun_tcp_packet_offsets;
6124 *pkt = dummy_udp_tun_udp_packet;
6125 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6126 *offsets = dummy_udp_tun_udp_packet_offsets;
6132 *pkt = dummy_vlan_udp_packet;
6133 *pkt_len = sizeof(dummy_vlan_udp_packet);
6134 *offsets = dummy_vlan_udp_packet_offsets;
6137 *pkt = dummy_udp_packet;
6138 *pkt_len = sizeof(dummy_udp_packet);
6139 *offsets = dummy_udp_packet_offsets;
6141 } else if (udp && ipv6) {
6143 *pkt = dummy_vlan_udp_ipv6_packet;
6144 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6145 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6148 *pkt = dummy_udp_ipv6_packet;
6149 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6150 *offsets = dummy_udp_ipv6_packet_offsets;
6152 } else if ((tcp && ipv6) || ipv6) {
6154 *pkt = dummy_vlan_tcp_ipv6_packet;
6155 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6156 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6159 *pkt = dummy_tcp_ipv6_packet;
6160 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6161 *offsets = dummy_tcp_ipv6_packet_offsets;
6166 *pkt = dummy_vlan_tcp_packet;
6167 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6168 *offsets = dummy_vlan_tcp_packet_offsets;
6170 *pkt = dummy_tcp_packet;
6171 *pkt_len = sizeof(dummy_tcp_packet);
6172 *offsets = dummy_tcp_packet_offsets;
6177 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6179 * @lkups: lookup elements or match criteria for the advanced recipe, one
6180 * structure per protocol header
6181 * @lkups_cnt: number of protocols
6182 * @s_rule: stores rule information from the match criteria
6183 * @dummy_pkt: dummy packet to fill according to filter match criteria
6184 * @pkt_len: packet length of dummy packet
6185 * @offsets: offset info for the dummy packet
6187 static enum ice_status
6188 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6189 struct ice_aqc_sw_rules_elem *s_rule,
6190 const u8 *dummy_pkt, u16 pkt_len,
6191 const struct ice_dummy_pkt_offsets *offsets)
6196 /* Start with a packet with a pre-defined/dummy content. Then, fill
6197 * in the header values to be looked up or matched.
6199 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6201 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6203 for (i = 0; i < lkups_cnt; i++) {
6204 enum ice_protocol_type type;
6205 u16 offset = 0, len = 0, j;
6208 /* find the start of this layer; it should be found since this
6209 * was already checked when search for the dummy packet
6211 type = lkups[i].type;
6212 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6213 if (type == offsets[j].type) {
6214 offset = offsets[j].offset;
6219 /* this should never happen in a correct calling sequence */
6221 return ICE_ERR_PARAM;
6223 switch (lkups[i].type) {
6226 len = sizeof(struct ice_ether_hdr);
6229 len = sizeof(struct ice_ethtype_hdr);
6232 len = sizeof(struct ice_vlan_hdr);
6236 len = sizeof(struct ice_ipv4_hdr);
6240 len = sizeof(struct ice_ipv6_hdr);
6245 len = sizeof(struct ice_l4_hdr);
6248 len = sizeof(struct ice_sctp_hdr);
6251 len = sizeof(struct ice_nvgre);
6256 len = sizeof(struct ice_udp_tnl_hdr);
6260 len = sizeof(struct ice_udp_gtp_hdr);
6263 len = sizeof(struct ice_pppoe_hdr);
6266 return ICE_ERR_PARAM;
6269 /* the length should be a word multiple */
6270 if (len % ICE_BYTES_PER_WORD)
6273 /* We have the offset to the header start, the length, the
6274 * caller's header values and mask. Use this information to
6275 * copy the data into the dummy packet appropriately based on
6276 * the mask. Note that we need to only write the bits as
6277 * indicated by the mask to make sure we don't improperly write
6278 * over any significant packet data.
6280 for (j = 0; j < len / sizeof(u16); j++)
6281 if (((u16 *)&lkups[i].m_u)[j])
6282 ((u16 *)(pkt + offset))[j] =
6283 (((u16 *)(pkt + offset))[j] &
6284 ~((u16 *)&lkups[i].m_u)[j]) |
6285 (((u16 *)&lkups[i].h_u)[j] &
6286 ((u16 *)&lkups[i].m_u)[j]);
6289 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6295 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6296 * @hw: pointer to the hardware structure
6297 * @tun_type: tunnel type
6298 * @pkt: dummy packet to fill in
6299 * @offsets: offset info for the dummy packet
6301 static enum ice_status
6302 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6303 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6308 case ICE_SW_TUN_AND_NON_TUN:
6309 case ICE_SW_TUN_VXLAN_GPE:
6310 case ICE_SW_TUN_VXLAN:
6311 case ICE_SW_TUN_UDP:
6312 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6316 case ICE_SW_TUN_GENEVE:
6317 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6322 /* Nothing needs to be done for this tunnel type */
6326 /* Find the outer UDP protocol header and insert the port number */
6327 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6328 if (offsets[i].type == ICE_UDP_OF) {
6329 struct ice_l4_hdr *hdr;
6332 offset = offsets[i].offset;
6333 hdr = (struct ice_l4_hdr *)&pkt[offset];
6334 hdr->dst_port = CPU_TO_BE16(open_port);
6344 * ice_find_adv_rule_entry - Search a rule entry
6345 * @hw: pointer to the hardware structure
6346 * @lkups: lookup elements or match criteria for the advanced recipe, one
6347 * structure per protocol header
6348 * @lkups_cnt: number of protocols
6349 * @recp_id: recipe ID for which we are finding the rule
6350 * @rinfo: other information regarding the rule e.g. priority and action info
6352 * Helper function to search for a given advance rule entry
6353 * Returns pointer to entry storing the rule if found
6355 static struct ice_adv_fltr_mgmt_list_entry *
6356 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6357 u16 lkups_cnt, u16 recp_id,
6358 struct ice_adv_rule_info *rinfo)
6360 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6361 struct ice_switch_info *sw = hw->switch_info;
6364 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6365 ice_adv_fltr_mgmt_list_entry, list_entry) {
6366 bool lkups_matched = true;
6368 if (lkups_cnt != list_itr->lkups_cnt)
6370 for (i = 0; i < list_itr->lkups_cnt; i++)
6371 if (memcmp(&list_itr->lkups[i], &lkups[i],
6373 lkups_matched = false;
6376 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6377 rinfo->tun_type == list_itr->rule_info.tun_type &&
6385 * ice_adv_add_update_vsi_list
6386 * @hw: pointer to the hardware structure
6387 * @m_entry: pointer to current adv filter management list entry
6388 * @cur_fltr: filter information from the book keeping entry
6389 * @new_fltr: filter information with the new VSI to be added
6391 * Call AQ command to add or update previously created VSI list with new VSI.
6393 * Helper function to do book keeping associated with adding filter information
6394 * The algorithm to do the booking keeping is described below :
6395 * When a VSI needs to subscribe to a given advanced filter
6396 * if only one VSI has been added till now
6397 * Allocate a new VSI list and add two VSIs
6398 * to this list using switch rule command
6399 * Update the previously created switch rule with the
6400 * newly created VSI list ID
6401 * if a VSI list was previously created
6402 * Add the new VSI to the previously created VSI list set
6403 * using the update switch rule command
6405 static enum ice_status
6406 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6407 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6408 struct ice_adv_rule_info *cur_fltr,
6409 struct ice_adv_rule_info *new_fltr)
6411 enum ice_status status;
6412 u16 vsi_list_id = 0;
6414 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6415 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6416 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6417 return ICE_ERR_NOT_IMPL;
6419 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6420 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6421 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6422 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6423 return ICE_ERR_NOT_IMPL;
6425 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6426 /* Only one entry existed in the mapping and it was not already
6427 * a part of a VSI list. So, create a VSI list with the old and
6430 struct ice_fltr_info tmp_fltr;
6431 u16 vsi_handle_arr[2];
6433 /* A rule already exists with the new VSI being added */
6434 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6435 new_fltr->sw_act.fwd_id.hw_vsi_id)
6436 return ICE_ERR_ALREADY_EXISTS;
6438 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6439 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6440 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6446 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6447 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6448 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6449 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6450 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6452 /* Update the previous switch rule of "forward to VSI" to
6455 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6459 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6460 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6461 m_entry->vsi_list_info =
6462 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6465 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6467 if (!m_entry->vsi_list_info)
6470 /* A rule already exists with the new VSI being added */
6471 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6474 /* Update the previously created VSI list set with
6475 * the new VSI ID passed in
6477 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6479 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6481 ice_aqc_opc_update_sw_rules,
6483 /* update VSI list mapping info with new VSI ID */
6485 ice_set_bit(vsi_handle,
6486 m_entry->vsi_list_info->vsi_map);
6489 m_entry->vsi_count++;
6494 * ice_add_adv_rule - helper function to create an advanced switch rule
6495 * @hw: pointer to the hardware structure
6496 * @lkups: information on the words that needs to be looked up. All words
6497 * together makes one recipe
6498 * @lkups_cnt: num of entries in the lkups array
6499 * @rinfo: other information related to the rule that needs to be programmed
6500 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6501 * ignored is case of error.
6503 * This function can program only 1 rule at a time. The lkups is used to
6504 * describe the all the words that forms the "lookup" portion of the recipe.
6505 * These words can span multiple protocols. Callers to this function need to
6506 * pass in a list of protocol headers with lookup information along and mask
6507 * that determines which words are valid from the given protocol header.
6508 * rinfo describes other information related to this rule such as forwarding
6509 * IDs, priority of this rule, etc.
6512 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6513 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6514 struct ice_rule_query_data *added_entry)
6516 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6517 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6518 const struct ice_dummy_pkt_offsets *pkt_offsets;
6519 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6520 struct LIST_HEAD_TYPE *rule_head;
6521 struct ice_switch_info *sw;
6522 enum ice_status status;
6523 const u8 *pkt = NULL;
6529 /* Initialize profile to result index bitmap */
6530 if (!hw->switch_info->prof_res_bm_init) {
6531 hw->switch_info->prof_res_bm_init = 1;
6532 ice_init_prof_result_bm(hw);
6535 prof_rule = ice_is_prof_rule(rinfo->tun_type);
6536 if (!prof_rule && !lkups_cnt)
6537 return ICE_ERR_PARAM;
6539 /* get # of words we need to match */
6541 for (i = 0; i < lkups_cnt; i++) {
6544 ptr = (u16 *)&lkups[i].m_u;
6545 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6551 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6552 return ICE_ERR_PARAM;
6554 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6555 return ICE_ERR_PARAM;
6558 /* make sure that we can locate a dummy packet */
6559 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6562 status = ICE_ERR_PARAM;
6563 goto err_ice_add_adv_rule;
6566 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6567 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6568 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6569 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6572 vsi_handle = rinfo->sw_act.vsi_handle;
6573 if (!ice_is_vsi_valid(hw, vsi_handle))
6574 return ICE_ERR_PARAM;
6576 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6577 rinfo->sw_act.fwd_id.hw_vsi_id =
6578 ice_get_hw_vsi_num(hw, vsi_handle);
6579 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6580 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6582 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6585 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6587 /* we have to add VSI to VSI_LIST and increment vsi_count.
6588 * Also Update VSI list so that we can change forwarding rule
6589 * if the rule already exists, we will check if it exists with
6590 * same vsi_id, if not then add it to the VSI list if it already
6591 * exists if not then create a VSI list and add the existing VSI
6592 * ID and the new VSI ID to the list
6593 * We will add that VSI to the list
6595 status = ice_adv_add_update_vsi_list(hw, m_entry,
6596 &m_entry->rule_info,
6599 added_entry->rid = rid;
6600 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6601 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6605 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6606 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6608 return ICE_ERR_NO_MEMORY;
6609 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6610 switch (rinfo->sw_act.fltr_act) {
6611 case ICE_FWD_TO_VSI:
6612 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6613 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6614 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6617 act |= ICE_SINGLE_ACT_TO_Q;
6618 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6619 ICE_SINGLE_ACT_Q_INDEX_M;
6621 case ICE_FWD_TO_QGRP:
6622 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6623 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6624 act |= ICE_SINGLE_ACT_TO_Q;
6625 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6626 ICE_SINGLE_ACT_Q_INDEX_M;
6627 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6628 ICE_SINGLE_ACT_Q_REGION_M;
6630 case ICE_DROP_PACKET:
6631 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6632 ICE_SINGLE_ACT_VALID_BIT;
6635 status = ICE_ERR_CFG;
6636 goto err_ice_add_adv_rule;
6639 /* set the rule LOOKUP type based on caller specified 'RX'
6640 * instead of hardcoding it to be either LOOKUP_TX/RX
6642 * for 'RX' set the source to be the port number
6643 * for 'TX' set the source to be the source HW VSI number (determined
6647 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6648 s_rule->pdata.lkup_tx_rx.src =
6649 CPU_TO_LE16(hw->port_info->lport);
6651 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6652 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6655 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6656 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6658 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6659 pkt_len, pkt_offsets);
6661 goto err_ice_add_adv_rule;
6663 if (rinfo->tun_type != ICE_NON_TUN &&
6664 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6665 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6666 s_rule->pdata.lkup_tx_rx.hdr,
6669 goto err_ice_add_adv_rule;
6672 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6673 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6676 goto err_ice_add_adv_rule;
6677 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6678 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6680 status = ICE_ERR_NO_MEMORY;
6681 goto err_ice_add_adv_rule;
6684 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6685 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6686 ICE_NONDMA_TO_NONDMA);
6687 if (!adv_fltr->lkups && !prof_rule) {
6688 status = ICE_ERR_NO_MEMORY;
6689 goto err_ice_add_adv_rule;
6692 adv_fltr->lkups_cnt = lkups_cnt;
6693 adv_fltr->rule_info = *rinfo;
6694 adv_fltr->rule_info.fltr_rule_id =
6695 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6696 sw = hw->switch_info;
6697 sw->recp_list[rid].adv_rule = true;
6698 rule_head = &sw->recp_list[rid].filt_rules;
6700 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6701 struct ice_fltr_info tmp_fltr;
6703 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6704 tmp_fltr.fltr_rule_id =
6705 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6706 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6707 tmp_fltr.fwd_id.hw_vsi_id =
6708 ice_get_hw_vsi_num(hw, vsi_handle);
6709 tmp_fltr.vsi_handle = vsi_handle;
6710 /* Update the previous switch rule of "forward to VSI" to
6713 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6715 goto err_ice_add_adv_rule;
6716 adv_fltr->vsi_count = 1;
6719 /* Add rule entry to book keeping list */
6720 LIST_ADD(&adv_fltr->list_entry, rule_head);
6722 added_entry->rid = rid;
6723 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6724 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6726 err_ice_add_adv_rule:
6727 if (status && adv_fltr) {
6728 ice_free(hw, adv_fltr->lkups);
6729 ice_free(hw, adv_fltr);
6732 ice_free(hw, s_rule);
6738 * ice_adv_rem_update_vsi_list
6739 * @hw: pointer to the hardware structure
6740 * @vsi_handle: VSI handle of the VSI to remove
6741 * @fm_list: filter management entry for which the VSI list management needs to
6744 static enum ice_status
6745 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6746 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6748 struct ice_vsi_list_map_info *vsi_list_info;
6749 enum ice_sw_lkup_type lkup_type;
6750 enum ice_status status;
6753 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6754 fm_list->vsi_count == 0)
6755 return ICE_ERR_PARAM;
6757 /* A rule with the VSI being removed does not exist */
6758 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6759 return ICE_ERR_DOES_NOT_EXIST;
6761 lkup_type = ICE_SW_LKUP_LAST;
6762 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6763 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6764 ice_aqc_opc_update_sw_rules,
6769 fm_list->vsi_count--;
6770 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6771 vsi_list_info = fm_list->vsi_list_info;
6772 if (fm_list->vsi_count == 1) {
6773 struct ice_fltr_info tmp_fltr;
6776 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6778 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6779 return ICE_ERR_OUT_OF_RANGE;
6781 /* Make sure VSI list is empty before removing it below */
6782 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6784 ice_aqc_opc_update_sw_rules,
6789 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6790 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6791 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6792 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6793 tmp_fltr.fwd_id.hw_vsi_id =
6794 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6795 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6796 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6798 /* Update the previous switch rule of "MAC forward to VSI" to
6799 * "MAC fwd to VSI list"
6801 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6803 ice_debug(hw, ICE_DBG_SW,
6804 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6805 tmp_fltr.fwd_id.hw_vsi_id, status);
6809 /* Remove the VSI list since it is no longer used */
6810 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6812 ice_debug(hw, ICE_DBG_SW,
6813 "Failed to remove VSI list %d, error %d\n",
6814 vsi_list_id, status);
6818 LIST_DEL(&vsi_list_info->list_entry);
6819 ice_free(hw, vsi_list_info);
6820 fm_list->vsi_list_info = NULL;
6827 * ice_rem_adv_rule - removes existing advanced switch rule
6828 * @hw: pointer to the hardware structure
6829 * @lkups: information on the words that needs to be looked up. All words
6830 * together makes one recipe
6831 * @lkups_cnt: num of entries in the lkups array
6832 * @rinfo: Its the pointer to the rule information for the rule
6834 * This function can be used to remove 1 rule at a time. The lkups is
6835 * used to describe all the words that forms the "lookup" portion of the
6836 * rule. These words can span multiple protocols. Callers to this function
6837 * need to pass in a list of protocol headers with lookup information along
6838 * and mask that determines which words are valid from the given protocol
6839 * header. rinfo describes other information related to this rule such as
6840 * forwarding IDs, priority of this rule, etc.
6843 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6844 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6846 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6847 struct ice_prot_lkup_ext lkup_exts;
6848 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6849 enum ice_status status = ICE_SUCCESS;
6850 bool remove_rule = false;
6851 u16 i, rid, vsi_handle;
6853 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6854 for (i = 0; i < lkups_cnt; i++) {
6857 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6860 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6865 /* Create any special protocol/offset pairs, such as looking at tunnel
6866 * bits by extracting metadata
6868 status = ice_add_special_words(rinfo, &lkup_exts);
6872 rid = ice_find_recp(hw, &lkup_exts);
6873 /* If did not find a recipe that match the existing criteria */
6874 if (rid == ICE_MAX_NUM_RECIPES)
6875 return ICE_ERR_PARAM;
6877 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6878 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6879 /* the rule is already removed */
6882 ice_acquire_lock(rule_lock);
6883 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6885 } else if (list_elem->vsi_count > 1) {
6886 list_elem->vsi_list_info->ref_cnt--;
6887 remove_rule = false;
6888 vsi_handle = rinfo->sw_act.vsi_handle;
6889 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6891 vsi_handle = rinfo->sw_act.vsi_handle;
6892 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6894 ice_release_lock(rule_lock);
6897 if (list_elem->vsi_count == 0)
6900 ice_release_lock(rule_lock);
6902 struct ice_aqc_sw_rules_elem *s_rule;
6905 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6907 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6910 return ICE_ERR_NO_MEMORY;
6911 s_rule->pdata.lkup_tx_rx.act = 0;
6912 s_rule->pdata.lkup_tx_rx.index =
6913 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6914 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6915 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6917 ice_aqc_opc_remove_sw_rules, NULL);
6918 if (status == ICE_SUCCESS) {
6919 ice_acquire_lock(rule_lock);
6920 LIST_DEL(&list_elem->list_entry);
6921 ice_free(hw, list_elem->lkups);
6922 ice_free(hw, list_elem);
6923 ice_release_lock(rule_lock);
6925 ice_free(hw, s_rule);
6931 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6932 * @hw: pointer to the hardware structure
6933 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6935 * This function is used to remove 1 rule at a time. The removal is based on
6936 * the remove_entry parameter. This function will remove rule for a given
6937 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6940 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6941 struct ice_rule_query_data *remove_entry)
6943 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6944 struct LIST_HEAD_TYPE *list_head;
6945 struct ice_adv_rule_info rinfo;
6946 struct ice_switch_info *sw;
6948 sw = hw->switch_info;
6949 if (!sw->recp_list[remove_entry->rid].recp_created)
6950 return ICE_ERR_PARAM;
6951 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6952 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6954 if (list_itr->rule_info.fltr_rule_id ==
6955 remove_entry->rule_id) {
6956 rinfo = list_itr->rule_info;
6957 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6958 return ice_rem_adv_rule(hw, list_itr->lkups,
6959 list_itr->lkups_cnt, &rinfo);
6962 return ICE_ERR_PARAM;
6966 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6968 * @hw: pointer to the hardware structure
6969 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6971 * This function is used to remove all the rules for a given VSI and as soon
6972 * as removing a rule fails, it will return immediately with the error code,
6973 * else it will return ICE_SUCCESS
6976 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6978 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6979 struct ice_vsi_list_map_info *map_info;
6980 struct LIST_HEAD_TYPE *list_head;
6981 struct ice_adv_rule_info rinfo;
6982 struct ice_switch_info *sw;
6983 enum ice_status status;
6984 u16 vsi_list_id = 0;
6987 sw = hw->switch_info;
6988 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6989 if (!sw->recp_list[rid].recp_created)
6991 if (!sw->recp_list[rid].adv_rule)
6993 list_head = &sw->recp_list[rid].filt_rules;
6995 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6996 ice_adv_fltr_mgmt_list_entry, list_entry) {
6997 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
7002 rinfo = list_itr->rule_info;
7003 rinfo.sw_act.vsi_handle = vsi_handle;
7004 status = ice_rem_adv_rule(hw, list_itr->lkups,
7005 list_itr->lkups_cnt, &rinfo);
7015 * ice_replay_fltr - Replay all the filters stored by a specific list head
7016 * @hw: pointer to the hardware structure
7017 * @list_head: list for which filters needs to be replayed
7018 * @recp_id: Recipe ID for which rules need to be replayed
7020 static enum ice_status
7021 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
7023 struct ice_fltr_mgmt_list_entry *itr;
7024 enum ice_status status = ICE_SUCCESS;
7025 struct ice_sw_recipe *recp_list;
7026 u8 lport = hw->port_info->lport;
7027 struct LIST_HEAD_TYPE l_head;
7029 if (LIST_EMPTY(list_head))
7032 recp_list = &hw->switch_info->recp_list[recp_id];
7033 /* Move entries from the given list_head to a temporary l_head so that
7034 * they can be replayed. Otherwise when trying to re-add the same
7035 * filter, the function will return already exists
7037 LIST_REPLACE_INIT(list_head, &l_head);
7039 /* Mark the given list_head empty by reinitializing it so filters
7040 * could be added again by *handler
7042 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
7044 struct ice_fltr_list_entry f_entry;
7046 f_entry.fltr_info = itr->fltr_info;
7047 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
7048 status = ice_add_rule_internal(hw, recp_list, lport,
7050 if (status != ICE_SUCCESS)
7055 /* Add a filter per VSI separately */
7060 ice_find_first_bit(itr->vsi_list_info->vsi_map,
7062 if (!ice_is_vsi_valid(hw, vsi_handle))
7065 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7066 f_entry.fltr_info.vsi_handle = vsi_handle;
7067 f_entry.fltr_info.fwd_id.hw_vsi_id =
7068 ice_get_hw_vsi_num(hw, vsi_handle);
7069 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7070 if (recp_id == ICE_SW_LKUP_VLAN)
7071 status = ice_add_vlan_internal(hw, recp_list,
7074 status = ice_add_rule_internal(hw, recp_list,
7077 if (status != ICE_SUCCESS)
7082 /* Clear the filter management list */
7083 ice_rem_sw_rule_info(hw, &l_head);
7088 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
7089 * @hw: pointer to the hardware structure
7091 * NOTE: This function does not clean up partially added filters on error.
7092 * It is up to caller of the function to issue a reset or fail early.
7094 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
7096 struct ice_switch_info *sw = hw->switch_info;
7097 enum ice_status status = ICE_SUCCESS;
7100 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7101 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
7103 status = ice_replay_fltr(hw, i, head);
7104 if (status != ICE_SUCCESS)
7111 * ice_replay_vsi_fltr - Replay filters for requested VSI
7112 * @hw: pointer to the hardware structure
7113 * @vsi_handle: driver VSI handle
7114 * @recp_id: Recipe ID for which rules need to be replayed
7115 * @list_head: list for which filters need to be replayed
7117 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7118 * It is required to pass valid VSI handle.
7120 static enum ice_status
7121 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7122 struct LIST_HEAD_TYPE *list_head)
7124 struct ice_fltr_mgmt_list_entry *itr;
7125 enum ice_status status = ICE_SUCCESS;
7126 struct ice_sw_recipe *recp_list;
7129 if (LIST_EMPTY(list_head))
7131 recp_list = &hw->switch_info->recp_list[recp_id];
7132 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7134 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7136 struct ice_fltr_list_entry f_entry;
7138 f_entry.fltr_info = itr->fltr_info;
7139 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7140 itr->fltr_info.vsi_handle == vsi_handle) {
7141 /* update the src in case it is VSI num */
7142 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7143 f_entry.fltr_info.src = hw_vsi_id;
7144 status = ice_add_rule_internal(hw, recp_list,
7145 hw->port_info->lport,
7147 if (status != ICE_SUCCESS)
7151 if (!itr->vsi_list_info ||
7152 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7154 /* Clearing it so that the logic can add it back */
7155 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7156 f_entry.fltr_info.vsi_handle = vsi_handle;
7157 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7158 /* update the src in case it is VSI num */
7159 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7160 f_entry.fltr_info.src = hw_vsi_id;
7161 if (recp_id == ICE_SW_LKUP_VLAN)
7162 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7164 status = ice_add_rule_internal(hw, recp_list,
7165 hw->port_info->lport,
7167 if (status != ICE_SUCCESS)
7175 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7176 * @hw: pointer to the hardware structure
7177 * @vsi_handle: driver VSI handle
7178 * @list_head: list for which filters need to be replayed
7180 * Replay the advanced rule for the given VSI.
7182 static enum ice_status
7183 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7184 struct LIST_HEAD_TYPE *list_head)
7186 struct ice_rule_query_data added_entry = { 0 };
7187 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7188 enum ice_status status = ICE_SUCCESS;
7190 if (LIST_EMPTY(list_head))
7192 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7194 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7195 u16 lk_cnt = adv_fltr->lkups_cnt;
7197 if (vsi_handle != rinfo->sw_act.vsi_handle)
7199 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7208 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7209 * @hw: pointer to the hardware structure
7210 * @vsi_handle: driver VSI handle
7212 * Replays filters for requested VSI via vsi_handle.
7214 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7216 struct ice_switch_info *sw = hw->switch_info;
7217 enum ice_status status;
7220 /* Update the recipes that were created */
7221 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7222 struct LIST_HEAD_TYPE *head;
7224 head = &sw->recp_list[i].filt_replay_rules;
7225 if (!sw->recp_list[i].adv_rule)
7226 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7228 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7229 if (status != ICE_SUCCESS)
7237 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7238 * @hw: pointer to the HW struct
7240 * Deletes the filter replay rules.
7242 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7244 struct ice_switch_info *sw = hw->switch_info;
7250 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7251 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7252 struct LIST_HEAD_TYPE *l_head;
7254 l_head = &sw->recp_list[i].filt_replay_rules;
7255 if (!sw->recp_list[i].adv_rule)
7256 ice_rem_sw_rule_info(hw, l_head);
7258 ice_rem_adv_rule_info(hw, l_head);