1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID 0x002F
14 #define ICE_PPP_IPV6_PROTO_ID 0x0057
16 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
17 * struct to configure any switch filter rules.
18 * {DA (6 bytes), SA(6 bytes),
19 * Ether type (2 bytes for header without VLAN tag) OR
20 * VLAN tag (4 bytes for header with VLAN tag) }
22 * Word on Hardcoded values
23 * byte 0 = 0x2: to identify it as locally administered DA MAC
24 * byte 6 = 0x2: to identify it as locally administered SA MAC
25 * byte 12 = 0x81 & byte 13 = 0x00:
26 * In case of VLAN filter first two bytes defines ether type (0x8100)
27 * and remaining two bytes are placeholder for programming a given VLAN ID
28 * In case of Ether type filter it is treated as header without VLAN tag
29 * and byte 12 and 13 is used to program a given Ether type instead
31 #define DUMMY_ETH_HDR_LEN 16
32 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
36 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
37 (sizeof(struct ice_aqc_sw_rules_elem) - \
38 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
39 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
40 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
41 (sizeof(struct ice_aqc_sw_rules_elem) - \
42 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
43 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
44 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
45 (sizeof(struct ice_aqc_sw_rules_elem) - \
46 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
47 sizeof(struct ice_sw_rule_lg_act) - \
48 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
49 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
50 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
51 (sizeof(struct ice_aqc_sw_rules_elem) - \
52 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
53 sizeof(struct ice_sw_rule_vsi_list) - \
54 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
55 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
57 struct ice_dummy_pkt_offsets {
58 enum ice_protocol_type type;
59 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
65 { ICE_IPV4_OFOS, 14 },
70 { ICE_PROTOCOL_LAST, 0 },
73 static const u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x06, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
107 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
109 { ICE_ETYPE_OL, 12 },
110 { ICE_IPV4_OFOS, 14 },
114 { ICE_UDP_ILOS, 76 },
115 { ICE_PROTOCOL_LAST, 0 },
118 static const u8 dummy_gre_udp_packet[] = {
119 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
120 0x00, 0x00, 0x00, 0x00,
121 0x00, 0x00, 0x00, 0x00,
123 0x08, 0x00, /* ICE_ETYPE_OL 12 */
125 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x2F, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00,
131 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
132 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
135 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00,
139 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x11, 0x00, 0x00,
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
146 0x00, 0x08, 0x00, 0x00,
149 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
151 { ICE_ETYPE_OL, 12 },
152 { ICE_IPV4_OFOS, 14 },
156 { ICE_VXLAN_GPE, 42 },
160 { ICE_PROTOCOL_LAST, 0 },
163 static const u8 dummy_udp_tun_tcp_packet[] = {
164 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
165 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
168 0x08, 0x00, /* ICE_ETYPE_OL 12 */
170 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
171 0x00, 0x01, 0x00, 0x00,
172 0x40, 0x11, 0x00, 0x00,
173 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
177 0x00, 0x46, 0x00, 0x00,
179 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
180 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
183 0x00, 0x00, 0x00, 0x00,
184 0x00, 0x00, 0x00, 0x00,
187 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
188 0x00, 0x01, 0x00, 0x00,
189 0x40, 0x06, 0x00, 0x00,
190 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
196 0x50, 0x02, 0x20, 0x00,
197 0x00, 0x00, 0x00, 0x00
200 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
202 { ICE_ETYPE_OL, 12 },
203 { ICE_IPV4_OFOS, 14 },
207 { ICE_VXLAN_GPE, 42 },
210 { ICE_UDP_ILOS, 84 },
211 { ICE_PROTOCOL_LAST, 0 },
214 static const u8 dummy_udp_tun_udp_packet[] = {
215 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
216 0x00, 0x00, 0x00, 0x00,
217 0x00, 0x00, 0x00, 0x00,
219 0x08, 0x00, /* ICE_ETYPE_OL 12 */
221 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
222 0x00, 0x01, 0x00, 0x00,
223 0x00, 0x11, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
228 0x00, 0x3a, 0x00, 0x00,
230 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
234 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00,
238 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
239 0x00, 0x01, 0x00, 0x00,
240 0x00, 0x11, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
245 0x00, 0x08, 0x00, 0x00,
248 /* offset info for MAC + IPv4 + UDP dummy packet */
249 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
251 { ICE_ETYPE_OL, 12 },
252 { ICE_IPV4_OFOS, 14 },
253 { ICE_UDP_ILOS, 34 },
254 { ICE_PROTOCOL_LAST, 0 },
257 /* Dummy packet for MAC + IPv4 + UDP */
258 static const u8 dummy_udp_packet[] = {
259 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
260 0x00, 0x00, 0x00, 0x00,
261 0x00, 0x00, 0x00, 0x00,
263 0x08, 0x00, /* ICE_ETYPE_OL 12 */
265 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
266 0x00, 0x01, 0x00, 0x00,
267 0x00, 0x11, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
272 0x00, 0x08, 0x00, 0x00,
274 0x00, 0x00, /* 2 bytes for 4 byte alignment */
277 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
278 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
280 { ICE_ETYPE_OL, 12 },
281 { ICE_VLAN_OFOS, 14 },
282 { ICE_IPV4_OFOS, 18 },
283 { ICE_UDP_ILOS, 38 },
284 { ICE_PROTOCOL_LAST, 0 },
287 /* C-tag (801.1Q), IPv4:UDP dummy packet */
288 static const u8 dummy_vlan_udp_packet[] = {
289 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
290 0x00, 0x00, 0x00, 0x00,
291 0x00, 0x00, 0x00, 0x00,
293 0x81, 0x00, /* ICE_ETYPE_OL 12 */
295 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
297 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
298 0x00, 0x01, 0x00, 0x00,
299 0x00, 0x11, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
304 0x00, 0x08, 0x00, 0x00,
306 0x00, 0x00, /* 2 bytes for 4 byte alignment */
309 /* offset info for MAC + IPv4 + TCP dummy packet */
310 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
312 { ICE_ETYPE_OL, 12 },
313 { ICE_IPV4_OFOS, 14 },
315 { ICE_PROTOCOL_LAST, 0 },
318 /* Dummy packet for MAC + IPv4 + TCP */
319 static const u8 dummy_tcp_packet[] = {
320 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
321 0x00, 0x00, 0x00, 0x00,
322 0x00, 0x00, 0x00, 0x00,
324 0x08, 0x00, /* ICE_ETYPE_OL 12 */
326 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
327 0x00, 0x01, 0x00, 0x00,
328 0x00, 0x06, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
333 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
335 0x50, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, /* 2 bytes for 4 byte alignment */
341 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
342 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
344 { ICE_ETYPE_OL, 12 },
345 { ICE_VLAN_OFOS, 14 },
346 { ICE_IPV4_OFOS, 18 },
348 { ICE_PROTOCOL_LAST, 0 },
351 /* C-tag (801.1Q), IPv4:TCP dummy packet */
352 static const u8 dummy_vlan_tcp_packet[] = {
353 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00,
357 0x81, 0x00, /* ICE_ETYPE_OL 12 */
359 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
361 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
362 0x00, 0x01, 0x00, 0x00,
363 0x00, 0x06, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
368 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
370 0x50, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, /* 2 bytes for 4 byte alignment */
376 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
378 { ICE_ETYPE_OL, 12 },
379 { ICE_IPV6_OFOS, 14 },
381 { ICE_PROTOCOL_LAST, 0 },
384 static const u8 dummy_tcp_ipv6_packet[] = {
385 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
386 0x00, 0x00, 0x00, 0x00,
387 0x00, 0x00, 0x00, 0x00,
389 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
391 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
392 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
399 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
405 0x50, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, /* 2 bytes for 4 byte alignment */
411 /* C-tag (802.1Q): IPv6 + TCP */
412 static const struct ice_dummy_pkt_offsets
413 dummy_vlan_tcp_ipv6_packet_offsets[] = {
415 { ICE_ETYPE_OL, 12 },
416 { ICE_VLAN_OFOS, 14 },
417 { ICE_IPV6_OFOS, 18 },
419 { ICE_PROTOCOL_LAST, 0 },
422 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
423 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
424 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
425 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00,
428 0x81, 0x00, /* ICE_ETYPE_OL 12 */
430 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
432 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
433 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
446 0x50, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
449 0x00, 0x00, /* 2 bytes for 4 byte alignment */
453 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
455 { ICE_ETYPE_OL, 12 },
456 { ICE_IPV6_OFOS, 14 },
457 { ICE_UDP_ILOS, 54 },
458 { ICE_PROTOCOL_LAST, 0 },
461 /* IPv6 + UDP dummy packet */
462 static const u8 dummy_udp_ipv6_packet[] = {
463 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
464 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, 0x00, 0x00,
467 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
469 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
470 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
477 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
481 0x00, 0x08, 0x00, 0x00,
483 0x00, 0x00, /* 2 bytes for 4 byte alignment */
486 /* C-tag (802.1Q): IPv6 + UDP */
487 static const struct ice_dummy_pkt_offsets
488 dummy_vlan_udp_ipv6_packet_offsets[] = {
490 { ICE_ETYPE_OL, 12 },
491 { ICE_VLAN_OFOS, 14 },
492 { ICE_IPV6_OFOS, 18 },
493 { ICE_UDP_ILOS, 58 },
494 { ICE_PROTOCOL_LAST, 0 },
497 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
498 static const u8 dummy_vlan_udp_ipv6_packet[] = {
499 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
500 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00,
503 0x81, 0x00, /* ICE_ETYPE_OL 12 */
505 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
507 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
508 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
519 0x00, 0x08, 0x00, 0x00,
521 0x00, 0x00, /* 2 bytes for 4 byte alignment */
524 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
526 { ICE_IPV4_OFOS, 14 },
529 { ICE_PROTOCOL_LAST, 0 },
532 static const u8 dummy_udp_gtp_packet[] = {
533 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
534 0x00, 0x00, 0x00, 0x00,
535 0x00, 0x00, 0x00, 0x00,
538 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
539 0x00, 0x00, 0x00, 0x00,
540 0x00, 0x11, 0x00, 0x00,
541 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
545 0x00, 0x1c, 0x00, 0x00,
547 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
548 0x00, 0x00, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x85,
551 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
552 0x00, 0x00, 0x00, 0x00,
555 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
557 { ICE_ETYPE_OL, 12 },
558 { ICE_VLAN_OFOS, 14},
560 { ICE_PROTOCOL_LAST, 0 },
563 static const u8 dummy_pppoe_ipv4_packet[] = {
564 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
565 0x00, 0x00, 0x00, 0x00,
566 0x00, 0x00, 0x00, 0x00,
568 0x81, 0x00, /* ICE_ETYPE_OL 12 */
570 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
572 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
575 0x00, 0x21, /* PPP Link Layer 24 */
577 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
586 static const u8 dummy_pppoe_ipv6_packet[] = {
587 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
588 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00,
591 0x81, 0x00, /* ICE_ETYPE_OL 12 */
593 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
595 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
598 0x00, 0x57, /* PPP Link Layer 24 */
600 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
601 0x00, 0x00, 0x00, 0x00,
602 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00,
604 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
614 /* this is a recipe to profile association bitmap */
615 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
616 ICE_MAX_NUM_PROFILES);
618 /* this is a profile to recipe association bitmap */
619 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
620 ICE_MAX_NUM_RECIPES);
622 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
625 * ice_collect_result_idx - copy result index values
626 * @buf: buffer that contains the result index
627 * @recp: the recipe struct to copy data into
629 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
630 struct ice_sw_recipe *recp)
632 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
633 ice_set_bit(buf->content.result_indx &
634 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
638 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
639 * @hw: pointer to hardware structure
640 * @recps: struct that we need to populate
641 * @rid: recipe ID that we are populating
642 * @refresh_required: true if we should get recipe to profile mapping from FW
644 * This function is used to populate all the necessary entries into our
645 * bookkeeping so that we have a current list of all the recipes that are
646 * programmed in the firmware.
648 static enum ice_status
649 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
650 bool *refresh_required)
652 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
653 struct ice_aqc_recipe_data_elem *tmp;
654 u16 num_recps = ICE_MAX_NUM_RECIPES;
655 struct ice_prot_lkup_ext *lkup_exts;
656 enum ice_status status;
660 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
662 /* we need a buffer big enough to accommodate all the recipes */
663 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
664 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
666 return ICE_ERR_NO_MEMORY;
668 tmp[0].recipe_indx = rid;
669 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
670 /* non-zero status meaning recipe doesn't exist */
674 /* Get recipe to profile map so that we can get the fv from lkups that
675 * we read for a recipe from FW. Since we want to minimize the number of
676 * times we make this FW call, just make one call and cache the copy
677 * until a new recipe is added. This operation is only required the
678 * first time to get the changes from FW. Then to search existing
679 * entries we don't need to update the cache again until another recipe
682 if (*refresh_required) {
683 ice_get_recp_to_prof_map(hw);
684 *refresh_required = false;
687 /* Start populating all the entries for recps[rid] based on lkups from
688 * firmware. Note that we are only creating the root recipe in our
691 lkup_exts = &recps[rid].lkup_exts;
693 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
694 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
695 struct ice_recp_grp_entry *rg_entry;
696 u8 i, prof, idx, prot = 0;
700 rg_entry = (struct ice_recp_grp_entry *)
701 ice_malloc(hw, sizeof(*rg_entry));
703 status = ICE_ERR_NO_MEMORY;
707 idx = root_bufs.recipe_indx;
708 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
710 /* Mark all result indices in this chain */
711 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
712 ice_set_bit(root_bufs.content.result_indx &
713 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
715 /* get the first profile that is associated with rid */
716 prof = ice_find_first_bit(recipe_to_profile[idx],
717 ICE_MAX_NUM_PROFILES);
718 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
719 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
721 rg_entry->fv_idx[i] = lkup_indx;
722 rg_entry->fv_mask[i] =
723 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
725 /* If the recipe is a chained recipe then all its
726 * child recipe's result will have a result index.
727 * To fill fv_words we should not use those result
728 * index, we only need the protocol ids and offsets.
729 * We will skip all the fv_idx which stores result
730 * index in them. We also need to skip any fv_idx which
731 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
732 * valid offset value.
734 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
735 rg_entry->fv_idx[i]) ||
736 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
737 rg_entry->fv_idx[i] == 0)
740 ice_find_prot_off(hw, ICE_BLK_SW, prof,
741 rg_entry->fv_idx[i], &prot, &off);
742 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
743 lkup_exts->fv_words[fv_word_idx].off = off;
746 /* populate rg_list with the data from the child entry of this
749 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
751 /* Propagate some data to the recipe database */
752 recps[idx].is_root = !!is_root;
753 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
754 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
755 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
756 recps[idx].chain_idx = root_bufs.content.result_indx &
757 ~ICE_AQ_RECIPE_RESULT_EN;
758 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
760 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
766 /* Only do the following for root recipes entries */
767 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
768 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
769 recps[idx].root_rid = root_bufs.content.rid &
770 ~ICE_AQ_RECIPE_ID_IS_ROOT;
771 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
774 /* Complete initialization of the root recipe entry */
775 lkup_exts->n_val_words = fv_word_idx;
776 recps[rid].big_recp = (num_recps > 1);
777 recps[rid].n_grp_count = (u8)num_recps;
778 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
779 ice_memdup(hw, tmp, recps[rid].n_grp_count *
780 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
781 if (!recps[rid].root_buf)
784 /* Copy result indexes */
785 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
786 recps[rid].recp_created = true;
794 * ice_get_recp_to_prof_map - updates recipe to profile mapping
795 * @hw: pointer to hardware structure
797 * This function is used to populate recipe_to_profile matrix where index to
798 * this array is the recipe ID and the element is the mapping of which profiles
799 * is this recipe mapped to.
802 ice_get_recp_to_prof_map(struct ice_hw *hw)
804 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
807 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
810 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
811 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
812 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
814 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
815 ICE_MAX_NUM_RECIPES);
816 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
817 if (ice_is_bit_set(r_bitmap, j))
818 ice_set_bit(i, recipe_to_profile[j]);
823 * ice_init_def_sw_recp - initialize the recipe book keeping tables
824 * @hw: pointer to the HW struct
825 * @recp_list: pointer to sw recipe list
827 * Allocate memory for the entire recipe table and initialize the structures/
828 * entries corresponding to basic recipes.
831 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
833 struct ice_sw_recipe *recps;
836 recps = (struct ice_sw_recipe *)
837 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
839 return ICE_ERR_NO_MEMORY;
841 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
842 recps[i].root_rid = i;
843 INIT_LIST_HEAD(&recps[i].filt_rules);
844 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
845 INIT_LIST_HEAD(&recps[i].rg_list);
846 ice_init_lock(&recps[i].filt_rule_lock);
855 * ice_aq_get_sw_cfg - get switch configuration
856 * @hw: pointer to the hardware structure
857 * @buf: pointer to the result buffer
858 * @buf_size: length of the buffer available for response
859 * @req_desc: pointer to requested descriptor
860 * @num_elems: pointer to number of elements
861 * @cd: pointer to command details structure or NULL
863 * Get switch configuration (0x0200) to be placed in 'buff'.
864 * This admin command returns information such as initial VSI/port number
865 * and switch ID it belongs to.
867 * NOTE: *req_desc is both an input/output parameter.
868 * The caller of this function first calls this function with *request_desc set
869 * to 0. If the response from f/w has *req_desc set to 0, all the switch
870 * configuration information has been returned; if non-zero (meaning not all
871 * the information was returned), the caller should call this function again
872 * with *req_desc set to the previous value returned by f/w to get the
873 * next block of switch configuration information.
875 * *num_elems is output only parameter. This reflects the number of elements
876 * in response buffer. The caller of this function to use *num_elems while
877 * parsing the response buffer.
879 static enum ice_status
880 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
881 u16 buf_size, u16 *req_desc, u16 *num_elems,
882 struct ice_sq_cd *cd)
884 struct ice_aqc_get_sw_cfg *cmd;
885 enum ice_status status;
886 struct ice_aq_desc desc;
888 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
889 cmd = &desc.params.get_sw_conf;
890 cmd->element = CPU_TO_LE16(*req_desc);
892 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
894 *req_desc = LE16_TO_CPU(cmd->element);
895 *num_elems = LE16_TO_CPU(cmd->num_elems);
902 * ice_alloc_sw - allocate resources specific to switch
903 * @hw: pointer to the HW struct
904 * @ena_stats: true to turn on VEB stats
905 * @shared_res: true for shared resource, false for dedicated resource
906 * @sw_id: switch ID returned
907 * @counter_id: VEB counter ID returned
909 * allocates switch resources (SWID and VEB counter) (0x0208)
912 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
915 struct ice_aqc_alloc_free_res_elem *sw_buf;
916 struct ice_aqc_res_elem *sw_ele;
917 enum ice_status status;
920 buf_len = sizeof(*sw_buf);
921 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
922 ice_malloc(hw, buf_len);
924 return ICE_ERR_NO_MEMORY;
926 /* Prepare buffer for switch ID.
927 * The number of resource entries in buffer is passed as 1 since only a
928 * single switch/VEB instance is allocated, and hence a single sw_id
931 sw_buf->num_elems = CPU_TO_LE16(1);
933 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
934 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
935 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
937 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
938 ice_aqc_opc_alloc_res, NULL);
941 goto ice_alloc_sw_exit;
943 sw_ele = &sw_buf->elem[0];
944 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
947 /* Prepare buffer for VEB Counter */
948 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
949 struct ice_aqc_alloc_free_res_elem *counter_buf;
950 struct ice_aqc_res_elem *counter_ele;
952 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
953 ice_malloc(hw, buf_len);
955 status = ICE_ERR_NO_MEMORY;
956 goto ice_alloc_sw_exit;
959 /* The number of resource entries in buffer is passed as 1 since
960 * only a single switch/VEB instance is allocated, and hence a
961 * single VEB counter is requested.
963 counter_buf->num_elems = CPU_TO_LE16(1);
964 counter_buf->res_type =
965 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
966 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
967 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
971 ice_free(hw, counter_buf);
972 goto ice_alloc_sw_exit;
974 counter_ele = &counter_buf->elem[0];
975 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
976 ice_free(hw, counter_buf);
980 ice_free(hw, sw_buf);
985 * ice_free_sw - free resources specific to switch
986 * @hw: pointer to the HW struct
987 * @sw_id: switch ID returned
988 * @counter_id: VEB counter ID returned
990 * free switch resources (SWID and VEB counter) (0x0209)
992 * NOTE: This function frees multiple resources. It continues
993 * releasing other resources even after it encounters error.
994 * The error code returned is the last error it encountered.
996 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
998 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
999 enum ice_status status, ret_status;
1002 buf_len = sizeof(*sw_buf);
1003 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1004 ice_malloc(hw, buf_len);
1006 return ICE_ERR_NO_MEMORY;
1008 /* Prepare buffer to free for switch ID res.
1009 * The number of resource entries in buffer is passed as 1 since only a
1010 * single switch/VEB instance is freed, and hence a single sw_id
1013 sw_buf->num_elems = CPU_TO_LE16(1);
1014 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1015 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1017 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1018 ice_aqc_opc_free_res, NULL);
1021 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1023 /* Prepare buffer to free for VEB Counter resource */
1024 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1025 ice_malloc(hw, buf_len);
1027 ice_free(hw, sw_buf);
1028 return ICE_ERR_NO_MEMORY;
1031 /* The number of resource entries in buffer is passed as 1 since only a
1032 * single switch/VEB instance is freed, and hence a single VEB counter
1035 counter_buf->num_elems = CPU_TO_LE16(1);
1036 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1037 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1039 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1040 ice_aqc_opc_free_res, NULL);
1042 ice_debug(hw, ICE_DBG_SW,
1043 "VEB counter resource could not be freed\n");
1044 ret_status = status;
1047 ice_free(hw, counter_buf);
1048 ice_free(hw, sw_buf);
1054 * @hw: pointer to the HW struct
1055 * @vsi_ctx: pointer to a VSI context struct
1056 * @cd: pointer to command details structure or NULL
1058 * Add a VSI context to the hardware (0x0210)
1061 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1062 struct ice_sq_cd *cd)
1064 struct ice_aqc_add_update_free_vsi_resp *res;
1065 struct ice_aqc_add_get_update_free_vsi *cmd;
1066 struct ice_aq_desc desc;
1067 enum ice_status status;
1069 cmd = &desc.params.vsi_cmd;
1070 res = &desc.params.add_update_free_vsi_res;
1072 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1074 if (!vsi_ctx->alloc_from_pool)
1075 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1076 ICE_AQ_VSI_IS_VALID);
1078 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1080 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1082 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1083 sizeof(vsi_ctx->info), cd);
1086 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1087 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1088 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1096 * @hw: pointer to the HW struct
1097 * @vsi_ctx: pointer to a VSI context struct
1098 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1099 * @cd: pointer to command details structure or NULL
1101 * Free VSI context info from hardware (0x0213)
1104 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1105 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1107 struct ice_aqc_add_update_free_vsi_resp *resp;
1108 struct ice_aqc_add_get_update_free_vsi *cmd;
1109 struct ice_aq_desc desc;
1110 enum ice_status status;
1112 cmd = &desc.params.vsi_cmd;
1113 resp = &desc.params.add_update_free_vsi_res;
1115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1117 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1119 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1121 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1123 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1124 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1132 * @hw: pointer to the HW struct
1133 * @vsi_ctx: pointer to a VSI context struct
1134 * @cd: pointer to command details structure or NULL
1136 * Update VSI context in the hardware (0x0211)
1139 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1140 struct ice_sq_cd *cd)
1142 struct ice_aqc_add_update_free_vsi_resp *resp;
1143 struct ice_aqc_add_get_update_free_vsi *cmd;
1144 struct ice_aq_desc desc;
1145 enum ice_status status;
1147 cmd = &desc.params.vsi_cmd;
1148 resp = &desc.params.add_update_free_vsi_res;
1150 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1152 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1154 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1156 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1157 sizeof(vsi_ctx->info), cd);
1160 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1161 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1168 * ice_is_vsi_valid - check whether the VSI is valid or not
1169 * @hw: pointer to the HW struct
1170 * @vsi_handle: VSI handle
1172 * check whether the VSI is valid or not
1174 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1176 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1180 * ice_get_hw_vsi_num - return the HW VSI number
1181 * @hw: pointer to the HW struct
1182 * @vsi_handle: VSI handle
1184 * return the HW VSI number
1185 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1187 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1189 return hw->vsi_ctx[vsi_handle]->vsi_num;
1193 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1194 * @hw: pointer to the HW struct
1195 * @vsi_handle: VSI handle
1197 * return the VSI context entry for a given VSI handle
1199 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1201 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1205 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1206 * @hw: pointer to the HW struct
1207 * @vsi_handle: VSI handle
1208 * @vsi: VSI context pointer
1210 * save the VSI context entry for a given VSI handle
1213 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1215 hw->vsi_ctx[vsi_handle] = vsi;
1219 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1220 * @hw: pointer to the HW struct
1221 * @vsi_handle: VSI handle
1223 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1225 struct ice_vsi_ctx *vsi;
1228 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1231 ice_for_each_traffic_class(i) {
1232 if (vsi->lan_q_ctx[i]) {
1233 ice_free(hw, vsi->lan_q_ctx[i]);
1234 vsi->lan_q_ctx[i] = NULL;
1240 * ice_clear_vsi_ctx - clear the VSI context entry
1241 * @hw: pointer to the HW struct
1242 * @vsi_handle: VSI handle
1244 * clear the VSI context entry
1246 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1248 struct ice_vsi_ctx *vsi;
1250 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1252 ice_clear_vsi_q_ctx(hw, vsi_handle);
1254 hw->vsi_ctx[vsi_handle] = NULL;
1259 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1260 * @hw: pointer to the HW struct
1262 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1266 for (i = 0; i < ICE_MAX_VSI; i++)
1267 ice_clear_vsi_ctx(hw, i);
1271 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1272 * @hw: pointer to the HW struct
1273 * @vsi_handle: unique VSI handle provided by drivers
1274 * @vsi_ctx: pointer to a VSI context struct
1275 * @cd: pointer to command details structure or NULL
1277 * Add a VSI context to the hardware also add it into the VSI handle list.
1278 * If this function gets called after reset for existing VSIs then update
1279 * with the new HW VSI number in the corresponding VSI handle list entry.
1282 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1283 struct ice_sq_cd *cd)
1285 struct ice_vsi_ctx *tmp_vsi_ctx;
1286 enum ice_status status;
1288 if (vsi_handle >= ICE_MAX_VSI)
1289 return ICE_ERR_PARAM;
1290 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1293 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1295 /* Create a new VSI context */
1296 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1297 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1299 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1300 return ICE_ERR_NO_MEMORY;
1302 *tmp_vsi_ctx = *vsi_ctx;
1304 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1306 /* update with new HW VSI num */
1307 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1314 * ice_free_vsi- free VSI context from hardware and VSI handle list
1315 * @hw: pointer to the HW struct
1316 * @vsi_handle: unique VSI handle
1317 * @vsi_ctx: pointer to a VSI context struct
1318 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1319 * @cd: pointer to command details structure or NULL
1321 * Free VSI context info from hardware as well as from VSI handle list
1324 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1325 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1327 enum ice_status status;
1329 if (!ice_is_vsi_valid(hw, vsi_handle))
1330 return ICE_ERR_PARAM;
1331 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1332 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1334 ice_clear_vsi_ctx(hw, vsi_handle);
1340 * @hw: pointer to the HW struct
1341 * @vsi_handle: unique VSI handle
1342 * @vsi_ctx: pointer to a VSI context struct
1343 * @cd: pointer to command details structure or NULL
1345 * Update VSI context in the hardware
1348 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1349 struct ice_sq_cd *cd)
1351 if (!ice_is_vsi_valid(hw, vsi_handle))
1352 return ICE_ERR_PARAM;
1353 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1354 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1358 * ice_aq_get_vsi_params
1359 * @hw: pointer to the HW struct
1360 * @vsi_ctx: pointer to a VSI context struct
1361 * @cd: pointer to command details structure or NULL
1363 * Get VSI context info from hardware (0x0212)
1366 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1367 struct ice_sq_cd *cd)
1369 struct ice_aqc_add_get_update_free_vsi *cmd;
1370 struct ice_aqc_get_vsi_resp *resp;
1371 struct ice_aq_desc desc;
1372 enum ice_status status;
1374 cmd = &desc.params.vsi_cmd;
1375 resp = &desc.params.get_vsi_resp;
1377 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1379 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1381 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1382 sizeof(vsi_ctx->info), cd);
1384 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1386 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1387 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1394 * ice_aq_add_update_mir_rule - add/update a mirror rule
1395 * @hw: pointer to the HW struct
1396 * @rule_type: Rule Type
1397 * @dest_vsi: VSI number to which packets will be mirrored
1398 * @count: length of the list
1399 * @mr_buf: buffer for list of mirrored VSI numbers
1400 * @cd: pointer to command details structure or NULL
1403 * Add/Update Mirror Rule (0x260).
1406 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1407 u16 count, struct ice_mir_rule_buf *mr_buf,
1408 struct ice_sq_cd *cd, u16 *rule_id)
1410 struct ice_aqc_add_update_mir_rule *cmd;
1411 struct ice_aq_desc desc;
1412 enum ice_status status;
1413 __le16 *mr_list = NULL;
1416 switch (rule_type) {
1417 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1418 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1419 /* Make sure count and mr_buf are set for these rule_types */
1420 if (!(count && mr_buf))
1421 return ICE_ERR_PARAM;
1423 buf_size = count * sizeof(__le16);
1424 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1426 return ICE_ERR_NO_MEMORY;
1428 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1429 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1430 /* Make sure count and mr_buf are not set for these
1433 if (count || mr_buf)
1434 return ICE_ERR_PARAM;
1437 ice_debug(hw, ICE_DBG_SW,
1438 "Error due to unsupported rule_type %u\n", rule_type);
1439 return ICE_ERR_OUT_OF_RANGE;
1442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1444 /* Pre-process 'mr_buf' items for add/update of virtual port
1445 * ingress/egress mirroring (but not physical port ingress/egress
1451 for (i = 0; i < count; i++) {
1454 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1456 /* Validate specified VSI number, make sure it is less
1457 * than ICE_MAX_VSI, if not return with error.
1459 if (id >= ICE_MAX_VSI) {
1460 ice_debug(hw, ICE_DBG_SW,
1461 "Error VSI index (%u) out-of-range\n",
1463 ice_free(hw, mr_list);
1464 return ICE_ERR_OUT_OF_RANGE;
1467 /* add VSI to mirror rule */
1470 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1471 else /* remove VSI from mirror rule */
1472 mr_list[i] = CPU_TO_LE16(id);
1476 cmd = &desc.params.add_update_rule;
1477 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1478 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1479 ICE_AQC_RULE_ID_VALID_M);
1480 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1481 cmd->num_entries = CPU_TO_LE16(count);
1482 cmd->dest = CPU_TO_LE16(dest_vsi);
1484 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1486 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1488 ice_free(hw, mr_list);
1494 * ice_aq_delete_mir_rule - delete a mirror rule
1495 * @hw: pointer to the HW struct
1496 * @rule_id: Mirror rule ID (to be deleted)
1497 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1498 * otherwise it is returned to the shared pool
1499 * @cd: pointer to command details structure or NULL
1501 * Delete Mirror Rule (0x261).
1504 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1505 struct ice_sq_cd *cd)
1507 struct ice_aqc_delete_mir_rule *cmd;
1508 struct ice_aq_desc desc;
1510 /* rule_id should be in the range 0...63 */
1511 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1512 return ICE_ERR_OUT_OF_RANGE;
1514 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1516 cmd = &desc.params.del_rule;
1517 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1518 cmd->rule_id = CPU_TO_LE16(rule_id);
1521 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1523 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1527 * ice_aq_alloc_free_vsi_list
1528 * @hw: pointer to the HW struct
1529 * @vsi_list_id: VSI list ID returned or used for lookup
1530 * @lkup_type: switch rule filter lookup type
1531 * @opc: switch rules population command type - pass in the command opcode
1533 * allocates or free a VSI list resource
1535 static enum ice_status
1536 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1537 enum ice_sw_lkup_type lkup_type,
1538 enum ice_adminq_opc opc)
1540 struct ice_aqc_alloc_free_res_elem *sw_buf;
1541 struct ice_aqc_res_elem *vsi_ele;
1542 enum ice_status status;
1545 buf_len = sizeof(*sw_buf);
1546 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1547 ice_malloc(hw, buf_len);
1549 return ICE_ERR_NO_MEMORY;
1550 sw_buf->num_elems = CPU_TO_LE16(1);
1552 if (lkup_type == ICE_SW_LKUP_MAC ||
1553 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1554 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1555 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1556 lkup_type == ICE_SW_LKUP_PROMISC ||
1557 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1558 lkup_type == ICE_SW_LKUP_LAST) {
1559 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1560 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1562 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1564 status = ICE_ERR_PARAM;
1565 goto ice_aq_alloc_free_vsi_list_exit;
1568 if (opc == ice_aqc_opc_free_res)
1569 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1571 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1573 goto ice_aq_alloc_free_vsi_list_exit;
1575 if (opc == ice_aqc_opc_alloc_res) {
1576 vsi_ele = &sw_buf->elem[0];
1577 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1580 ice_aq_alloc_free_vsi_list_exit:
1581 ice_free(hw, sw_buf);
1586 * ice_aq_set_storm_ctrl - Sets storm control configuration
1587 * @hw: pointer to the HW struct
1588 * @bcast_thresh: represents the upper threshold for broadcast storm control
1589 * @mcast_thresh: represents the upper threshold for multicast storm control
1590 * @ctl_bitmask: storm control control knobs
1592 * Sets the storm control configuration (0x0280)
1595 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1598 struct ice_aqc_storm_cfg *cmd;
1599 struct ice_aq_desc desc;
1601 cmd = &desc.params.storm_conf;
1603 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1605 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1606 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1607 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1609 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1613 * ice_aq_get_storm_ctrl - gets storm control configuration
1614 * @hw: pointer to the HW struct
1615 * @bcast_thresh: represents the upper threshold for broadcast storm control
1616 * @mcast_thresh: represents the upper threshold for multicast storm control
1617 * @ctl_bitmask: storm control control knobs
1619 * Gets the storm control configuration (0x0281)
1622 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1625 enum ice_status status;
1626 struct ice_aq_desc desc;
1628 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1630 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1632 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1635 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1638 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1641 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1648 * ice_aq_sw_rules - add/update/remove switch rules
1649 * @hw: pointer to the HW struct
1650 * @rule_list: pointer to switch rule population list
1651 * @rule_list_sz: total size of the rule list in bytes
1652 * @num_rules: number of switch rules in the rule_list
1653 * @opc: switch rules population command type - pass in the command opcode
1654 * @cd: pointer to command details structure or NULL
1656 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1658 static enum ice_status
1659 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1660 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1662 struct ice_aq_desc desc;
1664 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1666 if (opc != ice_aqc_opc_add_sw_rules &&
1667 opc != ice_aqc_opc_update_sw_rules &&
1668 opc != ice_aqc_opc_remove_sw_rules)
1669 return ICE_ERR_PARAM;
1671 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1673 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1674 desc.params.sw_rules.num_rules_fltr_entry_index =
1675 CPU_TO_LE16(num_rules);
1676 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1680 * ice_aq_add_recipe - add switch recipe
1681 * @hw: pointer to the HW struct
1682 * @s_recipe_list: pointer to switch rule population list
1683 * @num_recipes: number of switch recipes in the list
1684 * @cd: pointer to command details structure or NULL
1689 ice_aq_add_recipe(struct ice_hw *hw,
1690 struct ice_aqc_recipe_data_elem *s_recipe_list,
1691 u16 num_recipes, struct ice_sq_cd *cd)
1693 struct ice_aqc_add_get_recipe *cmd;
1694 struct ice_aq_desc desc;
1697 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1698 cmd = &desc.params.add_get_recipe;
1699 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1701 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1702 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1704 buf_size = num_recipes * sizeof(*s_recipe_list);
1706 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1710 * ice_aq_get_recipe - get switch recipe
1711 * @hw: pointer to the HW struct
1712 * @s_recipe_list: pointer to switch rule population list
1713 * @num_recipes: pointer to the number of recipes (input and output)
1714 * @recipe_root: root recipe number of recipe(s) to retrieve
1715 * @cd: pointer to command details structure or NULL
1719 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1720 * On output, *num_recipes will equal the number of entries returned in
1723 * The caller must supply enough space in s_recipe_list to hold all possible
1724 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1727 ice_aq_get_recipe(struct ice_hw *hw,
1728 struct ice_aqc_recipe_data_elem *s_recipe_list,
1729 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1731 struct ice_aqc_add_get_recipe *cmd;
1732 struct ice_aq_desc desc;
1733 enum ice_status status;
1736 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1737 return ICE_ERR_PARAM;
1739 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1740 cmd = &desc.params.add_get_recipe;
1741 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1743 cmd->return_index = CPU_TO_LE16(recipe_root);
1744 cmd->num_sub_recipes = 0;
1746 buf_size = *num_recipes * sizeof(*s_recipe_list);
1748 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1749 /* cppcheck-suppress constArgument */
1750 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1756 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1757 * @hw: pointer to the HW struct
1758 * @profile_id: package profile ID to associate the recipe with
1759 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1760 * @cd: pointer to command details structure or NULL
1761 * Recipe to profile association (0x0291)
1764 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1765 struct ice_sq_cd *cd)
1767 struct ice_aqc_recipe_to_profile *cmd;
1768 struct ice_aq_desc desc;
1770 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1771 cmd = &desc.params.recipe_to_profile;
1772 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1773 cmd->profile_id = CPU_TO_LE16(profile_id);
1774 /* Set the recipe ID bit in the bitmask to let the device know which
1775 * profile we are associating the recipe to
1777 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1778 ICE_NONDMA_TO_NONDMA);
1780 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1784 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1785 * @hw: pointer to the HW struct
1786 * @profile_id: package profile ID to associate the recipe with
1787 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1788 * @cd: pointer to command details structure or NULL
1789 * Associate profile ID with given recipe (0x0293)
1792 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1793 struct ice_sq_cd *cd)
1795 struct ice_aqc_recipe_to_profile *cmd;
1796 struct ice_aq_desc desc;
1797 enum ice_status status;
1799 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1800 cmd = &desc.params.recipe_to_profile;
1801 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1802 cmd->profile_id = CPU_TO_LE16(profile_id);
1804 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1806 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1807 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1813 * ice_alloc_recipe - add recipe resource
1814 * @hw: pointer to the hardware structure
1815 * @rid: recipe ID returned as response to AQ call
1817 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1819 struct ice_aqc_alloc_free_res_elem *sw_buf;
1820 enum ice_status status;
1823 buf_len = sizeof(*sw_buf);
1824 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1826 return ICE_ERR_NO_MEMORY;
1828 sw_buf->num_elems = CPU_TO_LE16(1);
1829 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1830 ICE_AQC_RES_TYPE_S) |
1831 ICE_AQC_RES_TYPE_FLAG_SHARED);
1832 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1833 ice_aqc_opc_alloc_res, NULL);
1835 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1836 ice_free(hw, sw_buf);
1841 /* ice_init_port_info - Initialize port_info with switch configuration data
1842 * @pi: pointer to port_info
1843 * @vsi_port_num: VSI number or port number
1844 * @type: Type of switch element (port or VSI)
1845 * @swid: switch ID of the switch the element is attached to
1846 * @pf_vf_num: PF or VF number
1847 * @is_vf: true if the element is a VF, false otherwise
1850 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1851 u16 swid, u16 pf_vf_num, bool is_vf)
1854 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1855 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1857 pi->pf_vf_num = pf_vf_num;
1859 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1860 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1863 ice_debug(pi->hw, ICE_DBG_SW,
1864 "incorrect VSI/port type received\n");
1869 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1870 * @hw: pointer to the hardware structure
1872 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1874 struct ice_aqc_get_sw_cfg_resp *rbuf;
1875 enum ice_status status;
1882 num_total_ports = 1;
1884 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1885 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1888 return ICE_ERR_NO_MEMORY;
1890 /* Multiple calls to ice_aq_get_sw_cfg may be required
1891 * to get all the switch configuration information. The need
1892 * for additional calls is indicated by ice_aq_get_sw_cfg
1893 * writing a non-zero value in req_desc
1896 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1897 &req_desc, &num_elems, NULL);
1902 for (i = 0; i < num_elems; i++) {
1903 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1904 u16 pf_vf_num, swid, vsi_port_num;
1908 ele = rbuf[i].elements;
1909 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1910 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1912 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1913 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1915 swid = LE16_TO_CPU(ele->swid);
1917 if (LE16_TO_CPU(ele->pf_vf_num) &
1918 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1921 res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
1922 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
1925 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1926 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1927 if (j == num_total_ports) {
1928 ice_debug(hw, ICE_DBG_SW,
1929 "more ports than expected\n");
1930 status = ICE_ERR_CFG;
1933 ice_init_port_info(hw->port_info,
1934 vsi_port_num, res_type, swid,
1942 } while (req_desc && !status);
1945 ice_free(hw, (void *)rbuf);
1950 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1951 * @hw: pointer to the hardware structure
1952 * @fi: filter info structure to fill/update
1954 * This helper function populates the lb_en and lan_en elements of the provided
1955 * ice_fltr_info struct using the switch's type and characteristics of the
1956 * switch rule being configured.
1958 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1962 if ((fi->flag & ICE_FLTR_TX) &&
1963 (fi->fltr_act == ICE_FWD_TO_VSI ||
1964 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1965 fi->fltr_act == ICE_FWD_TO_Q ||
1966 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1967 /* Setting LB for prune actions will result in replicated
1968 * packets to the internal switch that will be dropped.
1970 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1973 /* Set lan_en to TRUE if
1974 * 1. The switch is a VEB AND
1976 * 2.1 The lookup is a directional lookup like ethertype,
1977 * promiscuous, ethertype-MAC, promiscuous-VLAN
1978 * and default-port OR
1979 * 2.2 The lookup is VLAN, OR
1980 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1981 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1985 * The switch is a VEPA.
1987 * In all other cases, the LAN enable has to be set to false.
1990 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1991 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1992 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1993 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1994 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1995 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1996 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1997 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1998 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1999 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2008 * ice_fill_sw_rule - Helper function to fill switch rule structure
2009 * @hw: pointer to the hardware structure
2010 * @f_info: entry containing packet forwarding information
2011 * @s_rule: switch rule structure to be filled in based on mac_entry
2012 * @opc: switch rules population command type - pass in the command opcode
2015 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2016 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2018 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2026 if (opc == ice_aqc_opc_remove_sw_rules) {
2027 s_rule->pdata.lkup_tx_rx.act = 0;
2028 s_rule->pdata.lkup_tx_rx.index =
2029 CPU_TO_LE16(f_info->fltr_rule_id);
2030 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2034 eth_hdr_sz = sizeof(dummy_eth_header);
2035 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2037 /* initialize the ether header with a dummy header */
2038 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2039 ice_fill_sw_info(hw, f_info);
2041 switch (f_info->fltr_act) {
2042 case ICE_FWD_TO_VSI:
2043 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2044 ICE_SINGLE_ACT_VSI_ID_M;
2045 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2046 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2047 ICE_SINGLE_ACT_VALID_BIT;
2049 case ICE_FWD_TO_VSI_LIST:
2050 act |= ICE_SINGLE_ACT_VSI_LIST;
2051 act |= (f_info->fwd_id.vsi_list_id <<
2052 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2053 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2054 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2055 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2056 ICE_SINGLE_ACT_VALID_BIT;
2059 act |= ICE_SINGLE_ACT_TO_Q;
2060 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2061 ICE_SINGLE_ACT_Q_INDEX_M;
2063 case ICE_DROP_PACKET:
2064 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2065 ICE_SINGLE_ACT_VALID_BIT;
2067 case ICE_FWD_TO_QGRP:
2068 q_rgn = f_info->qgrp_size > 0 ?
2069 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2070 act |= ICE_SINGLE_ACT_TO_Q;
2071 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2072 ICE_SINGLE_ACT_Q_INDEX_M;
2073 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2074 ICE_SINGLE_ACT_Q_REGION_M;
2081 act |= ICE_SINGLE_ACT_LB_ENABLE;
2083 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2085 switch (f_info->lkup_type) {
2086 case ICE_SW_LKUP_MAC:
2087 daddr = f_info->l_data.mac.mac_addr;
2089 case ICE_SW_LKUP_VLAN:
2090 vlan_id = f_info->l_data.vlan.vlan_id;
2091 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2092 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2093 act |= ICE_SINGLE_ACT_PRUNE;
2094 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2097 case ICE_SW_LKUP_ETHERTYPE_MAC:
2098 daddr = f_info->l_data.ethertype_mac.mac_addr;
2100 case ICE_SW_LKUP_ETHERTYPE:
2101 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2102 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2104 case ICE_SW_LKUP_MAC_VLAN:
2105 daddr = f_info->l_data.mac_vlan.mac_addr;
2106 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2108 case ICE_SW_LKUP_PROMISC_VLAN:
2109 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2111 case ICE_SW_LKUP_PROMISC:
2112 daddr = f_info->l_data.mac_vlan.mac_addr;
2118 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2119 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2120 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2122 /* Recipe set depending on lookup type */
2123 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2124 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2125 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2128 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2129 ICE_NONDMA_TO_NONDMA);
2131 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2132 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2133 *off = CPU_TO_BE16(vlan_id);
2136 /* Create the switch rule with the final dummy Ethernet header */
2137 if (opc != ice_aqc_opc_update_sw_rules)
2138 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2142 * ice_add_marker_act
2143 * @hw: pointer to the hardware structure
2144 * @m_ent: the management entry for which sw marker needs to be added
2145 * @sw_marker: sw marker to tag the Rx descriptor with
2146 * @l_id: large action resource ID
2148 * Create a large action to hold software marker and update the switch rule
2149 * entry pointed by m_ent with newly created large action
2151 static enum ice_status
2152 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2153 u16 sw_marker, u16 l_id)
2155 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2156 /* For software marker we need 3 large actions
2157 * 1. FWD action: FWD TO VSI or VSI LIST
2158 * 2. GENERIC VALUE action to hold the profile ID
2159 * 3. GENERIC VALUE action to hold the software marker ID
2161 const u16 num_lg_acts = 3;
2162 enum ice_status status;
2168 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2169 return ICE_ERR_PARAM;
2171 /* Create two back-to-back switch rules and submit them to the HW using
2172 * one memory buffer:
2176 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2177 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2178 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2180 return ICE_ERR_NO_MEMORY;
2182 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2184 /* Fill in the first switch rule i.e. large action */
2185 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2186 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2187 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2189 /* First action VSI forwarding or VSI list forwarding depending on how
2192 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2193 m_ent->fltr_info.fwd_id.hw_vsi_id;
2195 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2196 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2197 ICE_LG_ACT_VSI_LIST_ID_M;
2198 if (m_ent->vsi_count > 1)
2199 act |= ICE_LG_ACT_VSI_LIST;
2200 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2202 /* Second action descriptor type */
2203 act = ICE_LG_ACT_GENERIC;
2205 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2206 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2208 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2209 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2211 /* Third action Marker value */
2212 act |= ICE_LG_ACT_GENERIC;
2213 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2214 ICE_LG_ACT_GENERIC_VALUE_M;
2216 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2218 /* call the fill switch rule to fill the lookup Tx Rx structure */
2219 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2220 ice_aqc_opc_update_sw_rules);
2222 /* Update the action to point to the large action ID */
2223 rx_tx->pdata.lkup_tx_rx.act =
2224 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2225 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2226 ICE_SINGLE_ACT_PTR_VAL_M));
2228 /* Use the filter rule ID of the previously created rule with single
2229 * act. Once the update happens, hardware will treat this as large
2232 rx_tx->pdata.lkup_tx_rx.index =
2233 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2235 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2236 ice_aqc_opc_update_sw_rules, NULL);
2238 m_ent->lg_act_idx = l_id;
2239 m_ent->sw_marker_id = sw_marker;
2242 ice_free(hw, lg_act);
2247 * ice_add_counter_act - add/update filter rule with counter action
2248 * @hw: pointer to the hardware structure
2249 * @m_ent: the management entry for which counter needs to be added
2250 * @counter_id: VLAN counter ID returned as part of allocate resource
2251 * @l_id: large action resource ID
2253 static enum ice_status
2254 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2255 u16 counter_id, u16 l_id)
2257 struct ice_aqc_sw_rules_elem *lg_act;
2258 struct ice_aqc_sw_rules_elem *rx_tx;
2259 enum ice_status status;
2260 /* 2 actions will be added while adding a large action counter */
2261 const int num_acts = 2;
2268 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2269 return ICE_ERR_PARAM;
2271 /* Create two back-to-back switch rules and submit them to the HW using
2272 * one memory buffer:
2276 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2277 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2278 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2281 return ICE_ERR_NO_MEMORY;
2283 rx_tx = (struct ice_aqc_sw_rules_elem *)
2284 ((u8 *)lg_act + lg_act_size);
2286 /* Fill in the first switch rule i.e. large action */
2287 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2288 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2289 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2291 /* First action VSI forwarding or VSI list forwarding depending on how
2294 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2295 m_ent->fltr_info.fwd_id.hw_vsi_id;
2297 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2298 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2299 ICE_LG_ACT_VSI_LIST_ID_M;
2300 if (m_ent->vsi_count > 1)
2301 act |= ICE_LG_ACT_VSI_LIST;
2302 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2304 /* Second action counter ID */
2305 act = ICE_LG_ACT_STAT_COUNT;
2306 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2307 ICE_LG_ACT_STAT_COUNT_M;
2308 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2310 /* call the fill switch rule to fill the lookup Tx Rx structure */
2311 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2312 ice_aqc_opc_update_sw_rules);
2314 act = ICE_SINGLE_ACT_PTR;
2315 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2316 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2318 /* Use the filter rule ID of the previously created rule with single
2319 * act. Once the update happens, hardware will treat this as large
2322 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2323 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2325 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2326 ice_aqc_opc_update_sw_rules, NULL);
2328 m_ent->lg_act_idx = l_id;
2329 m_ent->counter_index = counter_id;
2332 ice_free(hw, lg_act);
2337 * ice_create_vsi_list_map
2338 * @hw: pointer to the hardware structure
2339 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2340 * @num_vsi: number of VSI handles in the array
2341 * @vsi_list_id: VSI list ID generated as part of allocate resource
2343 * Helper function to create a new entry of VSI list ID to VSI mapping
2344 * using the given VSI list ID
2346 static struct ice_vsi_list_map_info *
2347 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2350 struct ice_switch_info *sw = hw->switch_info;
2351 struct ice_vsi_list_map_info *v_map;
2354 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2359 v_map->vsi_list_id = vsi_list_id;
2361 for (i = 0; i < num_vsi; i++)
2362 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2364 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2369 * ice_update_vsi_list_rule
2370 * @hw: pointer to the hardware structure
2371 * @vsi_handle_arr: array of VSI handles to form a VSI list
2372 * @num_vsi: number of VSI handles in the array
2373 * @vsi_list_id: VSI list ID generated as part of allocate resource
2374 * @remove: Boolean value to indicate if this is a remove action
2375 * @opc: switch rules population command type - pass in the command opcode
2376 * @lkup_type: lookup type of the filter
2378 * Call AQ command to add a new switch rule or update existing switch rule
2379 * using the given VSI list ID
2381 static enum ice_status
2382 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2383 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2384 enum ice_sw_lkup_type lkup_type)
2386 struct ice_aqc_sw_rules_elem *s_rule;
2387 enum ice_status status;
2393 return ICE_ERR_PARAM;
2395 if (lkup_type == ICE_SW_LKUP_MAC ||
2396 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2397 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2398 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2399 lkup_type == ICE_SW_LKUP_PROMISC ||
2400 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2401 lkup_type == ICE_SW_LKUP_LAST)
2402 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2403 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2404 else if (lkup_type == ICE_SW_LKUP_VLAN)
2405 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2406 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2408 return ICE_ERR_PARAM;
2410 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2411 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2413 return ICE_ERR_NO_MEMORY;
2414 for (i = 0; i < num_vsi; i++) {
2415 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2416 status = ICE_ERR_PARAM;
2419 /* AQ call requires hw_vsi_id(s) */
2420 s_rule->pdata.vsi_list.vsi[i] =
2421 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2424 s_rule->type = CPU_TO_LE16(rule_type);
2425 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2426 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2428 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2431 ice_free(hw, s_rule);
2436 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2437 * @hw: pointer to the HW struct
2438 * @vsi_handle_arr: array of VSI handles to form a VSI list
2439 * @num_vsi: number of VSI handles in the array
2440 * @vsi_list_id: stores the ID of the VSI list to be created
2441 * @lkup_type: switch rule filter's lookup type
2443 static enum ice_status
2444 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2445 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2447 enum ice_status status;
2449 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2450 ice_aqc_opc_alloc_res);
2454 /* Update the newly created VSI list to include the specified VSIs */
2455 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2456 *vsi_list_id, false,
2457 ice_aqc_opc_add_sw_rules, lkup_type);
2461 * ice_create_pkt_fwd_rule
2462 * @hw: pointer to the hardware structure
2463 * @recp_list: corresponding filter management list
2464 * @f_entry: entry containing packet forwarding information
2466 * Create switch rule with given filter information and add an entry
2467 * to the corresponding filter management list to track this switch rule
2470 static enum ice_status
2471 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2472 struct ice_fltr_list_entry *f_entry)
2474 struct ice_fltr_mgmt_list_entry *fm_entry;
2475 struct ice_aqc_sw_rules_elem *s_rule;
2476 enum ice_status status;
2478 s_rule = (struct ice_aqc_sw_rules_elem *)
2479 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2481 return ICE_ERR_NO_MEMORY;
2482 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2483 ice_malloc(hw, sizeof(*fm_entry));
2485 status = ICE_ERR_NO_MEMORY;
2486 goto ice_create_pkt_fwd_rule_exit;
2489 fm_entry->fltr_info = f_entry->fltr_info;
2491 /* Initialize all the fields for the management entry */
2492 fm_entry->vsi_count = 1;
2493 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2494 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2495 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2497 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2498 ice_aqc_opc_add_sw_rules);
2500 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2501 ice_aqc_opc_add_sw_rules, NULL);
2503 ice_free(hw, fm_entry);
2504 goto ice_create_pkt_fwd_rule_exit;
2507 f_entry->fltr_info.fltr_rule_id =
2508 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2509 fm_entry->fltr_info.fltr_rule_id =
2510 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2512 /* The book keeping entries will get removed when base driver
2513 * calls remove filter AQ command
2515 LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
2517 ice_create_pkt_fwd_rule_exit:
2518 ice_free(hw, s_rule);
2523 * ice_update_pkt_fwd_rule
2524 * @hw: pointer to the hardware structure
2525 * @f_info: filter information for switch rule
2527 * Call AQ command to update a previously created switch rule with a
2530 static enum ice_status
2531 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2533 struct ice_aqc_sw_rules_elem *s_rule;
2534 enum ice_status status;
2536 s_rule = (struct ice_aqc_sw_rules_elem *)
2537 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2539 return ICE_ERR_NO_MEMORY;
2541 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2543 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2545 /* Update switch rule with new rule set to forward VSI list */
2546 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2547 ice_aqc_opc_update_sw_rules, NULL);
2549 ice_free(hw, s_rule);
2554 * ice_update_sw_rule_bridge_mode
2555 * @hw: pointer to the HW struct
2557 * Updates unicast switch filter rules based on VEB/VEPA mode
2559 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2561 struct ice_switch_info *sw = hw->switch_info;
2562 struct ice_fltr_mgmt_list_entry *fm_entry;
2563 enum ice_status status = ICE_SUCCESS;
2564 struct LIST_HEAD_TYPE *rule_head;
2565 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2567 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2568 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2570 ice_acquire_lock(rule_lock);
2571 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2573 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2574 u8 *addr = fi->l_data.mac.mac_addr;
2576 /* Update unicast Tx rules to reflect the selected
2579 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2580 (fi->fltr_act == ICE_FWD_TO_VSI ||
2581 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2582 fi->fltr_act == ICE_FWD_TO_Q ||
2583 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2584 status = ice_update_pkt_fwd_rule(hw, fi);
2590 ice_release_lock(rule_lock);
2596 * ice_add_update_vsi_list
2597 * @hw: pointer to the hardware structure
2598 * @m_entry: pointer to current filter management list entry
2599 * @cur_fltr: filter information from the book keeping entry
2600 * @new_fltr: filter information with the new VSI to be added
2602 * Call AQ command to add or update previously created VSI list with new VSI.
2604 * Helper function to do book keeping associated with adding filter information
2605 * The algorithm to do the book keeping is described below :
2606 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2607 * if only one VSI has been added till now
2608 * Allocate a new VSI list and add two VSIs
2609 * to this list using switch rule command
2610 * Update the previously created switch rule with the
2611 * newly created VSI list ID
2612 * if a VSI list was previously created
2613 * Add the new VSI to the previously created VSI list set
2614 * using the update switch rule command
2616 static enum ice_status
2617 ice_add_update_vsi_list(struct ice_hw *hw,
2618 struct ice_fltr_mgmt_list_entry *m_entry,
2619 struct ice_fltr_info *cur_fltr,
2620 struct ice_fltr_info *new_fltr)
2622 enum ice_status status = ICE_SUCCESS;
2623 u16 vsi_list_id = 0;
2625 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2626 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2627 return ICE_ERR_NOT_IMPL;
2629 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2630 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2631 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2632 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2633 return ICE_ERR_NOT_IMPL;
2635 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2636 /* Only one entry existed in the mapping and it was not already
2637 * a part of a VSI list. So, create a VSI list with the old and
2640 struct ice_fltr_info tmp_fltr;
2641 u16 vsi_handle_arr[2];
2643 /* A rule already exists with the new VSI being added */
2644 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2645 return ICE_ERR_ALREADY_EXISTS;
2647 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2648 vsi_handle_arr[1] = new_fltr->vsi_handle;
2649 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2651 new_fltr->lkup_type);
2655 tmp_fltr = *new_fltr;
2656 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2657 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2658 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2659 /* Update the previous switch rule of "MAC forward to VSI" to
2660 * "MAC fwd to VSI list"
2662 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2666 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2667 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2668 m_entry->vsi_list_info =
2669 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2672 /* If this entry was large action then the large action needs
2673 * to be updated to point to FWD to VSI list
2675 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2677 ice_add_marker_act(hw, m_entry,
2678 m_entry->sw_marker_id,
2679 m_entry->lg_act_idx);
2681 u16 vsi_handle = new_fltr->vsi_handle;
2682 enum ice_adminq_opc opcode;
2684 if (!m_entry->vsi_list_info)
2687 /* A rule already exists with the new VSI being added */
2688 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2691 /* Update the previously created VSI list set with
2692 * the new VSI ID passed in
2694 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2695 opcode = ice_aqc_opc_update_sw_rules;
2697 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2698 vsi_list_id, false, opcode,
2699 new_fltr->lkup_type);
2700 /* update VSI list mapping info with new VSI ID */
2702 ice_set_bit(vsi_handle,
2703 m_entry->vsi_list_info->vsi_map);
2706 m_entry->vsi_count++;
2711 * ice_find_rule_entry - Search a rule entry
2712 * @list_head: head of rule list
2713 * @f_info: rule information
2715 * Helper function to search for a given rule entry
2716 * Returns pointer to entry storing the rule if found
2718 static struct ice_fltr_mgmt_list_entry *
2719 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
2720 struct ice_fltr_info *f_info)
2722 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2724 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2726 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2727 sizeof(f_info->l_data)) &&
2728 f_info->flag == list_itr->fltr_info.flag) {
2737 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2738 * @recp_list: VSI lists needs to be searched
2739 * @vsi_handle: VSI handle to be found in VSI list
2740 * @vsi_list_id: VSI list ID found containing vsi_handle
2742 * Helper function to search a VSI list with single entry containing given VSI
2743 * handle element. This can be extended further to search VSI list with more
2744 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2746 static struct ice_vsi_list_map_info *
2747 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
2750 struct ice_vsi_list_map_info *map_info = NULL;
2751 struct LIST_HEAD_TYPE *list_head;
2753 list_head = &recp_list->filt_rules;
2754 if (recp_list->adv_rule) {
2755 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2757 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2758 ice_adv_fltr_mgmt_list_entry,
2760 if (list_itr->vsi_list_info) {
2761 map_info = list_itr->vsi_list_info;
2762 if (ice_is_bit_set(map_info->vsi_map,
2764 *vsi_list_id = map_info->vsi_list_id;
2770 struct ice_fltr_mgmt_list_entry *list_itr;
2772 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2773 ice_fltr_mgmt_list_entry,
2775 if (list_itr->vsi_count == 1 &&
2776 list_itr->vsi_list_info) {
2777 map_info = list_itr->vsi_list_info;
2778 if (ice_is_bit_set(map_info->vsi_map,
2780 *vsi_list_id = map_info->vsi_list_id;
2790 * ice_add_rule_internal - add rule for a given lookup type
2791 * @hw: pointer to the hardware structure
2792 * @recp_list: recipe list for which rule has to be added
2793 * @lport: logic port number on which function add rule
2794 * @f_entry: structure containing MAC forwarding information
2796 * Adds or updates the rule lists for a given recipe
2798 static enum ice_status
2799 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2800 u8 lport, struct ice_fltr_list_entry *f_entry)
2802 struct ice_fltr_info *new_fltr, *cur_fltr;
2803 struct ice_fltr_mgmt_list_entry *m_entry;
2804 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2805 enum ice_status status = ICE_SUCCESS;
2807 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2808 return ICE_ERR_PARAM;
2810 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2811 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2812 f_entry->fltr_info.fwd_id.hw_vsi_id =
2813 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2815 rule_lock = &recp_list->filt_rule_lock;
2817 ice_acquire_lock(rule_lock);
2818 new_fltr = &f_entry->fltr_info;
2819 if (new_fltr->flag & ICE_FLTR_RX)
2820 new_fltr->src = lport;
2821 else if (new_fltr->flag & ICE_FLTR_TX)
2823 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2825 m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
2827 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
2828 goto exit_add_rule_internal;
2831 cur_fltr = &m_entry->fltr_info;
2832 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2834 exit_add_rule_internal:
2835 ice_release_lock(rule_lock);
2840 * ice_remove_vsi_list_rule
2841 * @hw: pointer to the hardware structure
2842 * @vsi_list_id: VSI list ID generated as part of allocate resource
2843 * @lkup_type: switch rule filter lookup type
2845 * The VSI list should be emptied before this function is called to remove the
2848 static enum ice_status
2849 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2850 enum ice_sw_lkup_type lkup_type)
2852 struct ice_aqc_sw_rules_elem *s_rule;
2853 enum ice_status status;
2856 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2857 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2859 return ICE_ERR_NO_MEMORY;
2861 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2862 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2864 /* Free the vsi_list resource that we allocated. It is assumed that the
2865 * list is empty at this point.
2867 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2868 ice_aqc_opc_free_res);
2870 ice_free(hw, s_rule);
2875 * ice_rem_update_vsi_list
2876 * @hw: pointer to the hardware structure
2877 * @vsi_handle: VSI handle of the VSI to remove
2878 * @fm_list: filter management entry for which the VSI list management needs to
2881 static enum ice_status
2882 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2883 struct ice_fltr_mgmt_list_entry *fm_list)
2885 enum ice_sw_lkup_type lkup_type;
2886 enum ice_status status = ICE_SUCCESS;
2889 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2890 fm_list->vsi_count == 0)
2891 return ICE_ERR_PARAM;
2893 /* A rule with the VSI being removed does not exist */
2894 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2895 return ICE_ERR_DOES_NOT_EXIST;
2897 lkup_type = fm_list->fltr_info.lkup_type;
2898 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2899 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2900 ice_aqc_opc_update_sw_rules,
2905 fm_list->vsi_count--;
2906 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2908 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2909 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2910 struct ice_vsi_list_map_info *vsi_list_info =
2911 fm_list->vsi_list_info;
2914 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2916 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2917 return ICE_ERR_OUT_OF_RANGE;
2919 /* Make sure VSI list is empty before removing it below */
2920 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2922 ice_aqc_opc_update_sw_rules,
2927 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2928 tmp_fltr_info.fwd_id.hw_vsi_id =
2929 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2930 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2931 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2933 ice_debug(hw, ICE_DBG_SW,
2934 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2935 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2939 fm_list->fltr_info = tmp_fltr_info;
2942 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2943 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2944 struct ice_vsi_list_map_info *vsi_list_info =
2945 fm_list->vsi_list_info;
2947 /* Remove the VSI list since it is no longer used */
2948 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2950 ice_debug(hw, ICE_DBG_SW,
2951 "Failed to remove VSI list %d, error %d\n",
2952 vsi_list_id, status);
2956 LIST_DEL(&vsi_list_info->list_entry);
2957 ice_free(hw, vsi_list_info);
2958 fm_list->vsi_list_info = NULL;
2965 * ice_remove_rule_internal - Remove a filter rule of a given type
2967 * @hw: pointer to the hardware structure
2968 * @recp_list: recipe list for which the rule needs to removed
2969 * @f_entry: rule entry containing filter information
2971 static enum ice_status
2972 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
2973 struct ice_fltr_list_entry *f_entry)
2975 struct ice_fltr_mgmt_list_entry *list_elem;
2976 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2977 enum ice_status status = ICE_SUCCESS;
2978 bool remove_rule = false;
2981 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2982 return ICE_ERR_PARAM;
2983 f_entry->fltr_info.fwd_id.hw_vsi_id =
2984 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2986 rule_lock = &recp_list->filt_rule_lock;
2987 ice_acquire_lock(rule_lock);
2988 list_elem = ice_find_rule_entry(&recp_list->filt_rules,
2989 &f_entry->fltr_info);
2991 status = ICE_ERR_DOES_NOT_EXIST;
2995 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2997 } else if (!list_elem->vsi_list_info) {
2998 status = ICE_ERR_DOES_NOT_EXIST;
3000 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3001 /* a ref_cnt > 1 indicates that the vsi_list is being
3002 * shared by multiple rules. Decrement the ref_cnt and
3003 * remove this rule, but do not modify the list, as it
3004 * is in-use by other rules.
3006 list_elem->vsi_list_info->ref_cnt--;
3009 /* a ref_cnt of 1 indicates the vsi_list is only used
3010 * by one rule. However, the original removal request is only
3011 * for a single VSI. Update the vsi_list first, and only
3012 * remove the rule if there are no further VSIs in this list.
3014 vsi_handle = f_entry->fltr_info.vsi_handle;
3015 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3018 /* if VSI count goes to zero after updating the VSI list */
3019 if (list_elem->vsi_count == 0)
3024 /* Remove the lookup rule */
3025 struct ice_aqc_sw_rules_elem *s_rule;
3027 s_rule = (struct ice_aqc_sw_rules_elem *)
3028 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3030 status = ICE_ERR_NO_MEMORY;
3034 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3035 ice_aqc_opc_remove_sw_rules);
3037 status = ice_aq_sw_rules(hw, s_rule,
3038 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3039 ice_aqc_opc_remove_sw_rules, NULL);
3041 /* Remove a book keeping from the list */
3042 ice_free(hw, s_rule);
3047 LIST_DEL(&list_elem->list_entry);
3048 ice_free(hw, list_elem);
3051 ice_release_lock(rule_lock);
3056 * ice_aq_get_res_alloc - get allocated resources
3057 * @hw: pointer to the HW struct
3058 * @num_entries: pointer to u16 to store the number of resource entries returned
3059 * @buf: pointer to user-supplied buffer
3060 * @buf_size: size of buff
3061 * @cd: pointer to command details structure or NULL
3063 * The user-supplied buffer must be large enough to store the resource
3064 * information for all resource types. Each resource type is an
3065 * ice_aqc_get_res_resp_data_elem structure.
3068 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3069 u16 buf_size, struct ice_sq_cd *cd)
3071 struct ice_aqc_get_res_alloc *resp;
3072 enum ice_status status;
3073 struct ice_aq_desc desc;
3076 return ICE_ERR_BAD_PTR;
3078 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3079 return ICE_ERR_INVAL_SIZE;
3081 resp = &desc.params.get_res;
3083 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3084 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3086 if (!status && num_entries)
3087 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3093 * ice_aq_get_res_descs - get allocated resource descriptors
3094 * @hw: pointer to the hardware structure
3095 * @num_entries: number of resource entries in buffer
3096 * @buf: Indirect buffer to hold data parameters and response
3097 * @buf_size: size of buffer for indirect commands
3098 * @res_type: resource type
3099 * @res_shared: is resource shared
3100 * @desc_id: input - first desc ID to start; output - next desc ID
3101 * @cd: pointer to command details structure or NULL
3104 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3105 struct ice_aqc_get_allocd_res_desc_resp *buf,
3106 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3107 struct ice_sq_cd *cd)
3109 struct ice_aqc_get_allocd_res_desc *cmd;
3110 struct ice_aq_desc desc;
3111 enum ice_status status;
3113 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3115 cmd = &desc.params.get_res_desc;
3118 return ICE_ERR_PARAM;
3120 if (buf_size != (num_entries * sizeof(*buf)))
3121 return ICE_ERR_PARAM;
3123 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3125 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3126 ICE_AQC_RES_TYPE_M) | (res_shared ?
3127 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3128 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3130 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3132 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3138 * ice_add_mac_rule - Add a MAC address based filter rule
3139 * @hw: pointer to the hardware structure
3140 * @m_list: list of MAC addresses and forwarding information
3141 * @sw: pointer to switch info struct for which function add rule
3142 * @lport: logic port number on which function add rule
3144 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3145 * multiple unicast addresses, the function assumes that all the
3146 * addresses are unique in a given add_mac call. It doesn't
3147 * check for duplicates in this case, removing duplicates from a given
3148 * list should be taken care of in the caller of this function.
3150 static enum ice_status
3151 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3152 struct ice_switch_info *sw, u8 lport)
3154 struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3155 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3156 struct ice_fltr_list_entry *m_list_itr;
3157 struct LIST_HEAD_TYPE *rule_head;
3158 u16 total_elem_left, s_rule_size;
3159 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3160 enum ice_status status = ICE_SUCCESS;
3161 u16 num_unicast = 0;
3165 rule_lock = &recp_list->filt_rule_lock;
3166 rule_head = &recp_list->filt_rules;
3168 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3170 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3174 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3175 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3176 if (!ice_is_vsi_valid(hw, vsi_handle))
3177 return ICE_ERR_PARAM;
3178 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3179 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3180 /* update the src in case it is VSI num */
3181 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3182 return ICE_ERR_PARAM;
3183 m_list_itr->fltr_info.src = hw_vsi_id;
3184 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3185 IS_ZERO_ETHER_ADDR(add))
3186 return ICE_ERR_PARAM;
3187 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3188 /* Don't overwrite the unicast address */
3189 ice_acquire_lock(rule_lock);
3190 if (ice_find_rule_entry(rule_head,
3191 &m_list_itr->fltr_info)) {
3192 ice_release_lock(rule_lock);
3193 return ICE_ERR_ALREADY_EXISTS;
3195 ice_release_lock(rule_lock);
3197 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3198 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3199 m_list_itr->status =
3200 ice_add_rule_internal(hw, recp_list, lport,
3202 if (m_list_itr->status)
3203 return m_list_itr->status;
3207 ice_acquire_lock(rule_lock);
3208 /* Exit if no suitable entries were found for adding bulk switch rule */
3210 status = ICE_SUCCESS;
3211 goto ice_add_mac_exit;
3214 /* Allocate switch rule buffer for the bulk update for unicast */
3215 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3216 s_rule = (struct ice_aqc_sw_rules_elem *)
3217 ice_calloc(hw, num_unicast, s_rule_size);
3219 status = ICE_ERR_NO_MEMORY;
3220 goto ice_add_mac_exit;
3224 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3226 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3227 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3229 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3230 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3231 ice_aqc_opc_add_sw_rules);
3232 r_iter = (struct ice_aqc_sw_rules_elem *)
3233 ((u8 *)r_iter + s_rule_size);
3237 /* Call AQ bulk switch rule update for all unicast addresses */
3239 /* Call AQ switch rule in AQ_MAX chunk */
3240 for (total_elem_left = num_unicast; total_elem_left > 0;
3241 total_elem_left -= elem_sent) {
3242 struct ice_aqc_sw_rules_elem *entry = r_iter;
3244 elem_sent = MIN_T(u8, total_elem_left,
3245 (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3246 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3247 elem_sent, ice_aqc_opc_add_sw_rules,
3250 goto ice_add_mac_exit;
3251 r_iter = (struct ice_aqc_sw_rules_elem *)
3252 ((u8 *)r_iter + (elem_sent * s_rule_size));
3255 /* Fill up rule ID based on the value returned from FW */
3257 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3259 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3260 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3261 struct ice_fltr_mgmt_list_entry *fm_entry;
3263 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3264 f_info->fltr_rule_id =
3265 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3266 f_info->fltr_act = ICE_FWD_TO_VSI;
3267 /* Create an entry to track this MAC address */
3268 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3269 ice_malloc(hw, sizeof(*fm_entry));
3271 status = ICE_ERR_NO_MEMORY;
3272 goto ice_add_mac_exit;
3274 fm_entry->fltr_info = *f_info;
3275 fm_entry->vsi_count = 1;
3276 /* The book keeping entries will get removed when
3277 * base driver calls remove filter AQ command
3280 LIST_ADD(&fm_entry->list_entry, rule_head);
3281 r_iter = (struct ice_aqc_sw_rules_elem *)
3282 ((u8 *)r_iter + s_rule_size);
3287 ice_release_lock(rule_lock);
3289 ice_free(hw, s_rule);
3294 * ice_add_mac - Add a MAC address based filter rule
3295 * @hw: pointer to the hardware structure
3296 * @m_list: list of MAC addresses and forwarding information
3298 * Function add MAC rule for logical port from HW struct
3301 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3304 return ICE_ERR_PARAM;
3306 return ice_add_mac_rule(hw, m_list, hw->switch_info,
3307 hw->port_info->lport);
3311 * ice_add_vlan_internal - Add one VLAN based filter rule
3312 * @hw: pointer to the hardware structure
3313 * @recp_list: recipe list for which rule has to be added
3314 * @f_entry: filter entry containing one VLAN information
3316 static enum ice_status
3317 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3318 struct ice_fltr_list_entry *f_entry)
3320 struct ice_fltr_mgmt_list_entry *v_list_itr;
3321 struct ice_fltr_info *new_fltr, *cur_fltr;
3322 enum ice_sw_lkup_type lkup_type;
3323 u16 vsi_list_id = 0, vsi_handle;
3324 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3325 enum ice_status status = ICE_SUCCESS;
3327 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3328 return ICE_ERR_PARAM;
3330 f_entry->fltr_info.fwd_id.hw_vsi_id =
3331 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3332 new_fltr = &f_entry->fltr_info;
3334 /* VLAN ID should only be 12 bits */
3335 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3336 return ICE_ERR_PARAM;
3338 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3339 return ICE_ERR_PARAM;
3341 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3342 lkup_type = new_fltr->lkup_type;
3343 vsi_handle = new_fltr->vsi_handle;
3344 rule_lock = &recp_list->filt_rule_lock;
3345 ice_acquire_lock(rule_lock);
3346 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3348 struct ice_vsi_list_map_info *map_info = NULL;
3350 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3351 /* All VLAN pruning rules use a VSI list. Check if
3352 * there is already a VSI list containing VSI that we
3353 * want to add. If found, use the same vsi_list_id for
3354 * this new VLAN rule or else create a new list.
3356 map_info = ice_find_vsi_list_entry(recp_list,
3360 status = ice_create_vsi_list_rule(hw,
3368 /* Convert the action to forwarding to a VSI list. */
3369 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3370 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3373 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3375 v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
3378 status = ICE_ERR_DOES_NOT_EXIST;
3381 /* reuse VSI list for new rule and increment ref_cnt */
3383 v_list_itr->vsi_list_info = map_info;
3384 map_info->ref_cnt++;
3386 v_list_itr->vsi_list_info =
3387 ice_create_vsi_list_map(hw, &vsi_handle,
3391 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3392 /* Update existing VSI list to add new VSI ID only if it used
3395 cur_fltr = &v_list_itr->fltr_info;
3396 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3399 /* If VLAN rule exists and VSI list being used by this rule is
3400 * referenced by more than 1 VLAN rule. Then create a new VSI
3401 * list appending previous VSI with new VSI and update existing
3402 * VLAN rule to point to new VSI list ID
3404 struct ice_fltr_info tmp_fltr;
3405 u16 vsi_handle_arr[2];
3408 /* Current implementation only supports reusing VSI list with
3409 * one VSI count. We should never hit below condition
3411 if (v_list_itr->vsi_count > 1 &&
3412 v_list_itr->vsi_list_info->ref_cnt > 1) {
3413 ice_debug(hw, ICE_DBG_SW,
3414 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3415 status = ICE_ERR_CFG;
3420 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3423 /* A rule already exists with the new VSI being added */
3424 if (cur_handle == vsi_handle) {
3425 status = ICE_ERR_ALREADY_EXISTS;
3429 vsi_handle_arr[0] = cur_handle;
3430 vsi_handle_arr[1] = vsi_handle;
3431 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3432 &vsi_list_id, lkup_type);
3436 tmp_fltr = v_list_itr->fltr_info;
3437 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3438 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3439 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3440 /* Update the previous switch rule to a new VSI list which
3441 * includes current VSI that is requested
3443 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3447 /* before overriding VSI list map info. decrement ref_cnt of
3450 v_list_itr->vsi_list_info->ref_cnt--;
3452 /* now update to newly created list */
3453 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3454 v_list_itr->vsi_list_info =
3455 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3457 v_list_itr->vsi_count++;
3461 ice_release_lock(rule_lock);
3466 * ice_add_vlan_rule - Add VLAN based filter rule
3467 * @hw: pointer to the hardware structure
3468 * @v_list: list of VLAN entries and forwarding information
3469 * @sw: pointer to switch info struct for which function add rule
3471 static enum ice_status
3472 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3473 struct ice_switch_info *sw)
3475 struct ice_fltr_list_entry *v_list_itr;
3476 struct ice_sw_recipe *recp_list;
3478 recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
3479 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3481 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3482 return ICE_ERR_PARAM;
3483 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3484 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
3486 if (v_list_itr->status)
3487 return v_list_itr->status;
3493 * ice_add_vlan - Add a VLAN based filter rule
3494 * @hw: pointer to the hardware structure
3495 * @v_list: list of VLAN and forwarding information
3497 * Function add VLAN rule for logical port from HW struct
3500 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3503 return ICE_ERR_PARAM;
3505 return ice_add_vlan_rule(hw, v_list, hw->switch_info);
3509 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3510 * @hw: pointer to the hardware structure
3511 * @mv_list: list of MAC and VLAN filters
3513 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3514 * pruning bits enabled, then it is the responsibility of the caller to make
3515 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3516 * VLAN won't be received on that VSI otherwise.
3519 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3521 struct ice_fltr_list_entry *mv_list_itr;
3522 struct ice_sw_recipe *recp_list;
3524 if (!mv_list || !hw)
3525 return ICE_ERR_PARAM;
3527 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3528 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3530 enum ice_sw_lkup_type l_type =
3531 mv_list_itr->fltr_info.lkup_type;
3533 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3534 return ICE_ERR_PARAM;
3535 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3536 mv_list_itr->status =
3537 ice_add_rule_internal(hw, recp_list,
3538 hw->port_info->lport,
3540 if (mv_list_itr->status)
3541 return mv_list_itr->status;
3547 * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
3548 * @hw: pointer to the hardware structure
3549 * @em_list: list of ether type MAC filter, MAC is optional
3550 * @sw: pointer to switch info struct for which function add rule
3551 * @lport: logic port number on which function add rule
3553 * This function requires the caller to populate the entries in
3554 * the filter list with the necessary fields (including flags to
3555 * indicate Tx or Rx rules).
3557 static enum ice_status
3558 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3559 struct ice_switch_info *sw, u8 lport)
3561 struct ice_fltr_list_entry *em_list_itr;
3563 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3565 struct ice_sw_recipe *recp_list;
3566 enum ice_sw_lkup_type l_type;
3568 l_type = em_list_itr->fltr_info.lkup_type;
3569 recp_list = &sw->recp_list[l_type];
3571 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3572 l_type != ICE_SW_LKUP_ETHERTYPE)
3573 return ICE_ERR_PARAM;
3575 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
3578 if (em_list_itr->status)
3579 return em_list_itr->status;
3586 * ice_add_eth_mac - Add a ethertype based filter rule
3587 * @hw: pointer to the hardware structure
3588 * @em_list: list of ethertype and forwarding information
3590 * Function add ethertype rule for logical port from HW struct
3592 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3594 if (!em_list || !hw)
3595 return ICE_ERR_PARAM;
3597 return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
3598 hw->port_info->lport);
3602 * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
3603 * @hw: pointer to the hardware structure
3604 * @em_list: list of ethertype or ethertype MAC entries
3605 * @sw: pointer to switch info struct for which function add rule
3607 static enum ice_status
3608 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
3609 struct ice_switch_info *sw)
3611 struct ice_fltr_list_entry *em_list_itr, *tmp;
3613 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3615 struct ice_sw_recipe *recp_list;
3616 enum ice_sw_lkup_type l_type;
3618 l_type = em_list_itr->fltr_info.lkup_type;
3620 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3621 l_type != ICE_SW_LKUP_ETHERTYPE)
3622 return ICE_ERR_PARAM;
3624 recp_list = &sw->recp_list[l_type];
3625 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3627 if (em_list_itr->status)
3628 return em_list_itr->status;
3634 * ice_remove_eth_mac - remove a ethertype based filter rule
3635 * @hw: pointer to the hardware structure
3636 * @em_list: list of ethertype and forwarding information
3640 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3642 if (!em_list || !hw)
3643 return ICE_ERR_PARAM;
3645 return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
3649 * ice_rem_sw_rule_info
3650 * @hw: pointer to the hardware structure
3651 * @rule_head: pointer to the switch list structure that we want to delete
3654 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3656 if (!LIST_EMPTY(rule_head)) {
3657 struct ice_fltr_mgmt_list_entry *entry;
3658 struct ice_fltr_mgmt_list_entry *tmp;
3660 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3661 ice_fltr_mgmt_list_entry, list_entry) {
3662 LIST_DEL(&entry->list_entry);
3663 ice_free(hw, entry);
3669 * ice_rem_adv_rule_info
3670 * @hw: pointer to the hardware structure
3671 * @rule_head: pointer to the switch list structure that we want to delete
3674 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3676 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3677 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3679 if (LIST_EMPTY(rule_head))
3682 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3683 ice_adv_fltr_mgmt_list_entry, list_entry) {
3684 LIST_DEL(&lst_itr->list_entry);
3685 ice_free(hw, lst_itr->lkups);
3686 ice_free(hw, lst_itr);
3691 * ice_rem_all_sw_rules_info
3692 * @hw: pointer to the hardware structure
3694 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3696 struct ice_switch_info *sw = hw->switch_info;
3699 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3700 struct LIST_HEAD_TYPE *rule_head;
3702 rule_head = &sw->recp_list[i].filt_rules;
3703 if (!sw->recp_list[i].adv_rule)
3704 ice_rem_sw_rule_info(hw, rule_head);
3706 ice_rem_adv_rule_info(hw, rule_head);
3711 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3712 * @pi: pointer to the port_info structure
3713 * @vsi_handle: VSI handle to set as default
3714 * @set: true to add the above mentioned switch rule, false to remove it
3715 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3717 * add filter rule to set/unset given VSI as default VSI for the switch
3718 * (represented by swid)
3721 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3724 struct ice_aqc_sw_rules_elem *s_rule;
3725 struct ice_fltr_info f_info;
3726 struct ice_hw *hw = pi->hw;
3727 enum ice_adminq_opc opcode;
3728 enum ice_status status;
3732 if (!ice_is_vsi_valid(hw, vsi_handle))
3733 return ICE_ERR_PARAM;
3734 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3736 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3737 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3738 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3740 return ICE_ERR_NO_MEMORY;
3742 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3744 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3745 f_info.flag = direction;
3746 f_info.fltr_act = ICE_FWD_TO_VSI;
3747 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3749 if (f_info.flag & ICE_FLTR_RX) {
3750 f_info.src = pi->lport;
3751 f_info.src_id = ICE_SRC_ID_LPORT;
3753 f_info.fltr_rule_id =
3754 pi->dflt_rx_vsi_rule_id;
3755 } else if (f_info.flag & ICE_FLTR_TX) {
3756 f_info.src_id = ICE_SRC_ID_VSI;
3757 f_info.src = hw_vsi_id;
3759 f_info.fltr_rule_id =
3760 pi->dflt_tx_vsi_rule_id;
3764 opcode = ice_aqc_opc_add_sw_rules;
3766 opcode = ice_aqc_opc_remove_sw_rules;
3768 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3770 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3771 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3774 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3776 if (f_info.flag & ICE_FLTR_TX) {
3777 pi->dflt_tx_vsi_num = hw_vsi_id;
3778 pi->dflt_tx_vsi_rule_id = index;
3779 } else if (f_info.flag & ICE_FLTR_RX) {
3780 pi->dflt_rx_vsi_num = hw_vsi_id;
3781 pi->dflt_rx_vsi_rule_id = index;
3784 if (f_info.flag & ICE_FLTR_TX) {
3785 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3786 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3787 } else if (f_info.flag & ICE_FLTR_RX) {
3788 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3789 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3794 ice_free(hw, s_rule);
3799 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3800 * @list_head: head of rule list
3801 * @f_info: rule information
3803 * Helper function to search for a unicast rule entry - this is to be used
3804 * to remove unicast MAC filter that is not shared with other VSIs on the
3807 * Returns pointer to entry storing the rule if found
3809 static struct ice_fltr_mgmt_list_entry *
3810 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
3811 struct ice_fltr_info *f_info)
3813 struct ice_fltr_mgmt_list_entry *list_itr;
3815 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3817 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3818 sizeof(f_info->l_data)) &&
3819 f_info->fwd_id.hw_vsi_id ==
3820 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3821 f_info->flag == list_itr->fltr_info.flag)
3828 * ice_remove_mac_rule - remove a MAC based filter rule
3829 * @hw: pointer to the hardware structure
3830 * @m_list: list of MAC addresses and forwarding information
3831 * @recp_list: list from which function remove MAC address
3833 * This function removes either a MAC filter rule or a specific VSI from a
3834 * VSI list for a multicast MAC address.
3836 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3837 * ice_add_mac. Caller should be aware that this call will only work if all
3838 * the entries passed into m_list were added previously. It will not attempt to
3839 * do a partial remove of entries that were found.
3841 static enum ice_status
3842 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3843 struct ice_sw_recipe *recp_list)
3845 struct ice_fltr_list_entry *list_itr, *tmp;
3846 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3849 return ICE_ERR_PARAM;
3851 rule_lock = &recp_list->filt_rule_lock;
3852 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3854 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3855 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3858 if (l_type != ICE_SW_LKUP_MAC)
3859 return ICE_ERR_PARAM;
3861 vsi_handle = list_itr->fltr_info.vsi_handle;
3862 if (!ice_is_vsi_valid(hw, vsi_handle))
3863 return ICE_ERR_PARAM;
3865 list_itr->fltr_info.fwd_id.hw_vsi_id =
3866 ice_get_hw_vsi_num(hw, vsi_handle);
3867 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3868 /* Don't remove the unicast address that belongs to
3869 * another VSI on the switch, since it is not being
3872 ice_acquire_lock(rule_lock);
3873 if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
3874 &list_itr->fltr_info)) {
3875 ice_release_lock(rule_lock);
3876 return ICE_ERR_DOES_NOT_EXIST;
3878 ice_release_lock(rule_lock);
3880 list_itr->status = ice_remove_rule_internal(hw, recp_list,
3882 if (list_itr->status)
3883 return list_itr->status;
3889 * ice_remove_mac - remove a MAC address based filter rule
3890 * @hw: pointer to the hardware structure
3891 * @m_list: list of MAC addresses and forwarding information
3895 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3897 struct ice_sw_recipe *recp_list;
3899 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
3900 return ice_remove_mac_rule(hw, m_list, recp_list);
3904 * ice_remove_vlan_rule - Remove VLAN based filter rule
3905 * @hw: pointer to the hardware structure
3906 * @v_list: list of VLAN entries and forwarding information
3907 * @recp_list: list from which function remove VLAN
3909 static enum ice_status
3910 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
3911 struct ice_sw_recipe *recp_list)
3913 struct ice_fltr_list_entry *v_list_itr, *tmp;
3915 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3917 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3919 if (l_type != ICE_SW_LKUP_VLAN)
3920 return ICE_ERR_PARAM;
3921 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
3923 if (v_list_itr->status)
3924 return v_list_itr->status;
3930 * ice_remove_vlan - remove a VLAN address based filter rule
3931 * @hw: pointer to the hardware structure
3932 * @v_list: list of VLAN and forwarding information
3936 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3938 struct ice_sw_recipe *recp_list;
3941 return ICE_ERR_PARAM;
3943 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
3944 return ice_remove_vlan_rule(hw, v_list, recp_list);
3948 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3949 * @hw: pointer to the hardware structure
3950 * @v_list: list of MAC VLAN entries and forwarding information
3953 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3955 struct ice_fltr_list_entry *v_list_itr, *tmp;
3956 struct ice_sw_recipe *recp_list;
3959 return ICE_ERR_PARAM;
3961 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
3962 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3964 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3966 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3967 return ICE_ERR_PARAM;
3968 v_list_itr->status =
3969 ice_remove_rule_internal(hw, recp_list,
3971 if (v_list_itr->status)
3972 return v_list_itr->status;
3978 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3979 * @fm_entry: filter entry to inspect
3980 * @vsi_handle: VSI handle to compare with filter info
3983 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3985 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3986 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3987 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3988 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3993 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3994 * @hw: pointer to the hardware structure
3995 * @vsi_handle: VSI handle to remove filters from
3996 * @vsi_list_head: pointer to the list to add entry to
3997 * @fi: pointer to fltr_info of filter entry to copy & add
3999 * Helper function, used when creating a list of filters to remove from
4000 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4001 * original filter entry, with the exception of fltr_info.fltr_act and
4002 * fltr_info.fwd_id fields. These are set such that later logic can
4003 * extract which VSI to remove the fltr from, and pass on that information.
4005 static enum ice_status
4006 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4007 struct LIST_HEAD_TYPE *vsi_list_head,
4008 struct ice_fltr_info *fi)
4010 struct ice_fltr_list_entry *tmp;
4012 /* this memory is freed up in the caller function
4013 * once filters for this VSI are removed
4015 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4017 return ICE_ERR_NO_MEMORY;
4019 tmp->fltr_info = *fi;
4021 /* Overwrite these fields to indicate which VSI to remove filter from,
4022 * so find and remove logic can extract the information from the
4023 * list entries. Note that original entries will still have proper
4026 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4027 tmp->fltr_info.vsi_handle = vsi_handle;
4028 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4030 LIST_ADD(&tmp->list_entry, vsi_list_head);
4036 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4037 * @hw: pointer to the hardware structure
4038 * @vsi_handle: VSI handle to remove filters from
4039 * @lkup_list_head: pointer to the list that has certain lookup type filters
4040 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4042 * Locates all filters in lkup_list_head that are used by the given VSI,
4043 * and adds COPIES of those entries to vsi_list_head (intended to be used
4044 * to remove the listed filters).
4045 * Note that this means all entries in vsi_list_head must be explicitly
4046 * deallocated by the caller when done with list.
4048 static enum ice_status
4049 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4050 struct LIST_HEAD_TYPE *lkup_list_head,
4051 struct LIST_HEAD_TYPE *vsi_list_head)
4053 struct ice_fltr_mgmt_list_entry *fm_entry;
4054 enum ice_status status = ICE_SUCCESS;
4056 /* check to make sure VSI ID is valid and within boundary */
4057 if (!ice_is_vsi_valid(hw, vsi_handle))
4058 return ICE_ERR_PARAM;
4060 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4061 ice_fltr_mgmt_list_entry, list_entry) {
4062 struct ice_fltr_info *fi;
4064 fi = &fm_entry->fltr_info;
4065 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4068 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4077 * ice_determine_promisc_mask
4078 * @fi: filter info to parse
4080 * Helper function to determine which ICE_PROMISC_ mask corresponds
4081 * to given filter into.
4083 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4085 u16 vid = fi->l_data.mac_vlan.vlan_id;
4086 u8 *macaddr = fi->l_data.mac.mac_addr;
4087 bool is_tx_fltr = false;
4088 u8 promisc_mask = 0;
4090 if (fi->flag == ICE_FLTR_TX)
4093 if (IS_BROADCAST_ETHER_ADDR(macaddr))
4094 promisc_mask |= is_tx_fltr ?
4095 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4096 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4097 promisc_mask |= is_tx_fltr ?
4098 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4099 else if (IS_UNICAST_ETHER_ADDR(macaddr))
4100 promisc_mask |= is_tx_fltr ?
4101 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4103 promisc_mask |= is_tx_fltr ?
4104 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4106 return promisc_mask;
4110 * ice_get_vsi_promisc - get promiscuous mode of given VSI
4111 * @hw: pointer to the hardware structure
4112 * @vsi_handle: VSI handle to retrieve info from
4113 * @promisc_mask: pointer to mask to be filled in
4114 * @vid: VLAN ID of promisc VLAN VSI
4117 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4120 struct ice_switch_info *sw = hw->switch_info;
4121 struct ice_fltr_mgmt_list_entry *itr;
4122 struct LIST_HEAD_TYPE *rule_head;
4123 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4125 if (!ice_is_vsi_valid(hw, vsi_handle))
4126 return ICE_ERR_PARAM;
4130 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4131 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4133 ice_acquire_lock(rule_lock);
4134 LIST_FOR_EACH_ENTRY(itr, rule_head,
4135 ice_fltr_mgmt_list_entry, list_entry) {
4136 /* Continue if this filter doesn't apply to this VSI or the
4137 * VSI ID is not in the VSI map for this filter
4139 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4142 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4144 ice_release_lock(rule_lock);
4150 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4151 * @hw: pointer to the hardware structure
4152 * @vsi_handle: VSI handle to retrieve info from
4153 * @promisc_mask: pointer to mask to be filled in
4154 * @vid: VLAN ID of promisc VLAN VSI
4157 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4160 struct ice_switch_info *sw = hw->switch_info;
4161 struct ice_fltr_mgmt_list_entry *itr;
4162 struct LIST_HEAD_TYPE *rule_head;
4163 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4165 if (!ice_is_vsi_valid(hw, vsi_handle))
4166 return ICE_ERR_PARAM;
4170 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4171 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4173 ice_acquire_lock(rule_lock);
4174 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4176 /* Continue if this filter doesn't apply to this VSI or the
4177 * VSI ID is not in the VSI map for this filter
4179 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4182 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4184 ice_release_lock(rule_lock);
4190 * ice_remove_promisc - Remove promisc based filter rules
4191 * @hw: pointer to the hardware structure
4192 * @recp_id: recipe ID for which the rule needs to removed
4193 * @v_list: list of promisc entries
4195 static enum ice_status
4196 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4197 struct LIST_HEAD_TYPE *v_list)
4199 struct ice_fltr_list_entry *v_list_itr, *tmp;
4200 struct ice_sw_recipe *recp_list;
4202 recp_list = &hw->switch_info->recp_list[recp_id];
4203 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4205 v_list_itr->status =
4206 ice_remove_rule_internal(hw, recp_list, v_list_itr);
4207 if (v_list_itr->status)
4208 return v_list_itr->status;
4214 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4215 * @hw: pointer to the hardware structure
4216 * @vsi_handle: VSI handle to clear mode
4217 * @promisc_mask: mask of promiscuous config bits to clear
4218 * @vid: VLAN ID to clear VLAN promiscuous
4221 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4224 struct ice_switch_info *sw = hw->switch_info;
4225 struct ice_fltr_list_entry *fm_entry, *tmp;
4226 struct LIST_HEAD_TYPE remove_list_head;
4227 struct ice_fltr_mgmt_list_entry *itr;
4228 struct LIST_HEAD_TYPE *rule_head;
4229 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4230 enum ice_status status = ICE_SUCCESS;
4233 if (!ice_is_vsi_valid(hw, vsi_handle))
4234 return ICE_ERR_PARAM;
4236 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4237 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4239 recipe_id = ICE_SW_LKUP_PROMISC;
4241 rule_head = &sw->recp_list[recipe_id].filt_rules;
4242 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4244 INIT_LIST_HEAD(&remove_list_head);
4246 ice_acquire_lock(rule_lock);
4247 LIST_FOR_EACH_ENTRY(itr, rule_head,
4248 ice_fltr_mgmt_list_entry, list_entry) {
4249 struct ice_fltr_info *fltr_info;
4250 u8 fltr_promisc_mask = 0;
4252 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4254 fltr_info = &itr->fltr_info;
4256 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4257 vid != fltr_info->l_data.mac_vlan.vlan_id)
4260 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4262 /* Skip if filter is not completely specified by given mask */
4263 if (fltr_promisc_mask & ~promisc_mask)
4266 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4270 ice_release_lock(rule_lock);
4271 goto free_fltr_list;
4274 ice_release_lock(rule_lock);
4276 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4279 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4280 ice_fltr_list_entry, list_entry) {
4281 LIST_DEL(&fm_entry->list_entry);
4282 ice_free(hw, fm_entry);
4289 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4290 * @hw: pointer to the hardware structure
4291 * @vsi_handle: VSI handle to configure
4292 * @promisc_mask: mask of promiscuous config bits
4293 * @vid: VLAN ID to set VLAN promiscuous
4296 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4298 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4299 struct ice_fltr_list_entry f_list_entry;
4300 struct ice_fltr_info new_fltr;
4301 enum ice_status status = ICE_SUCCESS;
4307 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4309 if (!ice_is_vsi_valid(hw, vsi_handle))
4310 return ICE_ERR_PARAM;
4311 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4313 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4315 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4316 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4317 new_fltr.l_data.mac_vlan.vlan_id = vid;
4318 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4320 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4321 recipe_id = ICE_SW_LKUP_PROMISC;
4324 /* Separate filters must be set for each direction/packet type
4325 * combination, so we will loop over the mask value, store the
4326 * individual type, and clear it out in the input mask as it
4329 while (promisc_mask) {
4330 struct ice_sw_recipe *recp_list;
4336 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4337 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4338 pkt_type = UCAST_FLTR;
4339 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4340 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4341 pkt_type = UCAST_FLTR;
4343 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4344 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4345 pkt_type = MCAST_FLTR;
4346 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4347 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4348 pkt_type = MCAST_FLTR;
4350 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4351 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4352 pkt_type = BCAST_FLTR;
4353 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4354 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4355 pkt_type = BCAST_FLTR;
4359 /* Check for VLAN promiscuous flag */
4360 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4361 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4362 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4363 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4367 /* Set filter DA based on packet type */
4368 mac_addr = new_fltr.l_data.mac.mac_addr;
4369 if (pkt_type == BCAST_FLTR) {
4370 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4371 } else if (pkt_type == MCAST_FLTR ||
4372 pkt_type == UCAST_FLTR) {
4373 /* Use the dummy ether header DA */
4374 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4375 ICE_NONDMA_TO_NONDMA);
4376 if (pkt_type == MCAST_FLTR)
4377 mac_addr[0] |= 0x1; /* Set multicast bit */
4380 /* Need to reset this to zero for all iterations */
4383 new_fltr.flag |= ICE_FLTR_TX;
4384 new_fltr.src = hw_vsi_id;
4386 new_fltr.flag |= ICE_FLTR_RX;
4387 new_fltr.src = hw->port_info->lport;
4390 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4391 new_fltr.vsi_handle = vsi_handle;
4392 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4393 f_list_entry.fltr_info = new_fltr;
4394 recp_list = &hw->switch_info->recp_list[recipe_id];
4396 status = ice_add_rule_internal(hw, recp_list,
4397 hw->port_info->lport,
4399 if (status != ICE_SUCCESS)
4400 goto set_promisc_exit;
4408 * ice_set_vlan_vsi_promisc
4409 * @hw: pointer to the hardware structure
4410 * @vsi_handle: VSI handle to configure
4411 * @promisc_mask: mask of promiscuous config bits
4412 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4414 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4417 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4418 bool rm_vlan_promisc)
4420 struct ice_switch_info *sw = hw->switch_info;
4421 struct ice_fltr_list_entry *list_itr, *tmp;
4422 struct LIST_HEAD_TYPE vsi_list_head;
4423 struct LIST_HEAD_TYPE *vlan_head;
4424 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4425 enum ice_status status;
4428 INIT_LIST_HEAD(&vsi_list_head);
4429 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4430 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4431 ice_acquire_lock(vlan_lock);
4432 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4434 ice_release_lock(vlan_lock);
4436 goto free_fltr_list;
4438 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4440 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4441 if (rm_vlan_promisc)
4442 status = ice_clear_vsi_promisc(hw, vsi_handle,
4443 promisc_mask, vlan_id);
4445 status = ice_set_vsi_promisc(hw, vsi_handle,
4446 promisc_mask, vlan_id);
4452 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4453 ice_fltr_list_entry, list_entry) {
4454 LIST_DEL(&list_itr->list_entry);
4455 ice_free(hw, list_itr);
4461 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4462 * @hw: pointer to the hardware structure
4463 * @vsi_handle: VSI handle to remove filters from
4464 * @recp_list: recipe list from which function remove fltr
4465 * @lkup: switch rule filter lookup type
4468 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4469 struct ice_sw_recipe *recp_list,
4470 enum ice_sw_lkup_type lkup)
4472 struct ice_fltr_list_entry *fm_entry;
4473 struct LIST_HEAD_TYPE remove_list_head;
4474 struct LIST_HEAD_TYPE *rule_head;
4475 struct ice_fltr_list_entry *tmp;
4476 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4477 enum ice_status status;
4479 INIT_LIST_HEAD(&remove_list_head);
4480 rule_lock = &recp_list[lkup].filt_rule_lock;
4481 rule_head = &recp_list[lkup].filt_rules;
4482 ice_acquire_lock(rule_lock);
4483 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4485 ice_release_lock(rule_lock);
4490 case ICE_SW_LKUP_MAC:
4491 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
4493 case ICE_SW_LKUP_VLAN:
4494 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
4496 case ICE_SW_LKUP_PROMISC:
4497 case ICE_SW_LKUP_PROMISC_VLAN:
4498 ice_remove_promisc(hw, lkup, &remove_list_head);
4500 case ICE_SW_LKUP_MAC_VLAN:
4501 ice_remove_mac_vlan(hw, &remove_list_head);
4503 case ICE_SW_LKUP_ETHERTYPE:
4504 case ICE_SW_LKUP_ETHERTYPE_MAC:
4505 ice_remove_eth_mac(hw, &remove_list_head);
4507 case ICE_SW_LKUP_DFLT:
4508 ice_debug(hw, ICE_DBG_SW,
4509 "Remove filters for this lookup type hasn't been implemented yet\n");
4511 case ICE_SW_LKUP_LAST:
4512 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4516 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4517 ice_fltr_list_entry, list_entry) {
4518 LIST_DEL(&fm_entry->list_entry);
4519 ice_free(hw, fm_entry);
4524 * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
4525 * @hw: pointer to the hardware structure
4526 * @vsi_handle: VSI handle to remove filters from
4527 * @sw: pointer to switch info struct
4530 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
4531 struct ice_switch_info *sw)
4533 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4535 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4536 sw->recp_list, ICE_SW_LKUP_MAC);
4537 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4538 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
4539 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4540 sw->recp_list, ICE_SW_LKUP_PROMISC);
4541 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4542 sw->recp_list, ICE_SW_LKUP_VLAN);
4543 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4544 sw->recp_list, ICE_SW_LKUP_DFLT);
4545 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4546 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
4547 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4548 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
4549 ice_remove_vsi_lkup_fltr(hw, vsi_handle,
4550 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
4554 * ice_remove_vsi_fltr - Remove all filters for a VSI
4555 * @hw: pointer to the hardware structure
4556 * @vsi_handle: VSI handle to remove filters from
4558 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4560 ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
4564 * ice_alloc_res_cntr - allocating resource counter
4565 * @hw: pointer to the hardware structure
4566 * @type: type of resource
4567 * @alloc_shared: if set it is shared else dedicated
4568 * @num_items: number of entries requested for FD resource type
4569 * @counter_id: counter index returned by AQ call
4572 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4575 struct ice_aqc_alloc_free_res_elem *buf;
4576 enum ice_status status;
4579 /* Allocate resource */
4580 buf_len = sizeof(*buf);
4581 buf = (struct ice_aqc_alloc_free_res_elem *)
4582 ice_malloc(hw, buf_len);
4584 return ICE_ERR_NO_MEMORY;
4586 buf->num_elems = CPU_TO_LE16(num_items);
4587 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4588 ICE_AQC_RES_TYPE_M) | alloc_shared);
4590 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4591 ice_aqc_opc_alloc_res, NULL);
4595 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4603 * ice_free_res_cntr - free resource counter
4604 * @hw: pointer to the hardware structure
4605 * @type: type of resource
4606 * @alloc_shared: if set it is shared else dedicated
4607 * @num_items: number of entries to be freed for FD resource type
4608 * @counter_id: counter ID resource which needs to be freed
4611 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4614 struct ice_aqc_alloc_free_res_elem *buf;
4615 enum ice_status status;
4619 buf_len = sizeof(*buf);
4620 buf = (struct ice_aqc_alloc_free_res_elem *)
4621 ice_malloc(hw, buf_len);
4623 return ICE_ERR_NO_MEMORY;
4625 buf->num_elems = CPU_TO_LE16(num_items);
4626 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4627 ICE_AQC_RES_TYPE_M) | alloc_shared);
4628 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4630 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4631 ice_aqc_opc_free_res, NULL);
4633 ice_debug(hw, ICE_DBG_SW,
4634 "counter resource could not be freed\n");
4641 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4642 * @hw: pointer to the hardware structure
4643 * @counter_id: returns counter index
4645 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4647 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4648 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4653 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4654 * @hw: pointer to the hardware structure
4655 * @counter_id: counter index to be freed
4657 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4659 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4660 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4665 * ice_alloc_res_lg_act - add large action resource
4666 * @hw: pointer to the hardware structure
4667 * @l_id: large action ID to fill it in
4668 * @num_acts: number of actions to hold with a large action entry
4670 static enum ice_status
4671 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4673 struct ice_aqc_alloc_free_res_elem *sw_buf;
4674 enum ice_status status;
4677 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4678 return ICE_ERR_PARAM;
4680 /* Allocate resource for large action */
4681 buf_len = sizeof(*sw_buf);
4682 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4683 ice_malloc(hw, buf_len);
4685 return ICE_ERR_NO_MEMORY;
4687 sw_buf->num_elems = CPU_TO_LE16(1);
4689 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4690 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4691 * If num_acts is greater than 2, then use
4692 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4693 * The num_acts cannot exceed 4. This was ensured at the
4694 * beginning of the function.
4697 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4698 else if (num_acts == 2)
4699 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4701 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4703 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4704 ice_aqc_opc_alloc_res, NULL);
4706 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4708 ice_free(hw, sw_buf);
4713 * ice_add_mac_with_sw_marker - add filter with sw marker
4714 * @hw: pointer to the hardware structure
4715 * @f_info: filter info structure containing the MAC filter information
4716 * @sw_marker: sw marker to tag the Rx descriptor with
4719 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4722 struct ice_fltr_mgmt_list_entry *m_entry;
4723 struct ice_fltr_list_entry fl_info;
4724 struct ice_sw_recipe *recp_list;
4725 struct LIST_HEAD_TYPE l_head;
4726 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4727 enum ice_status ret;
4731 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4732 return ICE_ERR_PARAM;
4734 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4735 return ICE_ERR_PARAM;
4737 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4738 return ICE_ERR_PARAM;
4740 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4741 return ICE_ERR_PARAM;
4742 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4744 /* Add filter if it doesn't exist so then the adding of large
4745 * action always results in update
4748 INIT_LIST_HEAD(&l_head);
4749 fl_info.fltr_info = *f_info;
4750 LIST_ADD(&fl_info.list_entry, &l_head);
4752 entry_exists = false;
4753 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4754 hw->port_info->lport);
4755 if (ret == ICE_ERR_ALREADY_EXISTS)
4756 entry_exists = true;
4760 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4761 rule_lock = &recp_list->filt_rule_lock;
4762 ice_acquire_lock(rule_lock);
4763 /* Get the book keeping entry for the filter */
4764 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4768 /* If counter action was enabled for this rule then don't enable
4769 * sw marker large action
4771 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4772 ret = ICE_ERR_PARAM;
4776 /* if same marker was added before */
4777 if (m_entry->sw_marker_id == sw_marker) {
4778 ret = ICE_ERR_ALREADY_EXISTS;
4782 /* Allocate a hardware table entry to hold large act. Three actions
4783 * for marker based large action
4785 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4789 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4792 /* Update the switch rule to add the marker action */
4793 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4795 ice_release_lock(rule_lock);
4800 ice_release_lock(rule_lock);
4801 /* only remove entry if it did not exist previously */
4803 ret = ice_remove_mac(hw, &l_head);
4809 * ice_add_mac_with_counter - add filter with counter enabled
4810 * @hw: pointer to the hardware structure
4811 * @f_info: pointer to filter info structure containing the MAC filter
4815 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4817 struct ice_fltr_mgmt_list_entry *m_entry;
4818 struct ice_fltr_list_entry fl_info;
4819 struct ice_sw_recipe *recp_list;
4820 struct LIST_HEAD_TYPE l_head;
4821 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4822 enum ice_status ret;
4827 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4828 return ICE_ERR_PARAM;
4830 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4831 return ICE_ERR_PARAM;
4833 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4834 return ICE_ERR_PARAM;
4835 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4836 recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4838 entry_exist = false;
4840 rule_lock = &recp_list->filt_rule_lock;
4842 /* Add filter if it doesn't exist so then the adding of large
4843 * action always results in update
4845 INIT_LIST_HEAD(&l_head);
4847 fl_info.fltr_info = *f_info;
4848 LIST_ADD(&fl_info.list_entry, &l_head);
4850 ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
4851 hw->port_info->lport);
4852 if (ret == ICE_ERR_ALREADY_EXISTS)
4857 ice_acquire_lock(rule_lock);
4858 m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
4860 ret = ICE_ERR_BAD_PTR;
4864 /* Don't enable counter for a filter for which sw marker was enabled */
4865 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4866 ret = ICE_ERR_PARAM;
4870 /* If a counter was already enabled then don't need to add again */
4871 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4872 ret = ICE_ERR_ALREADY_EXISTS;
4876 /* Allocate a hardware table entry to VLAN counter */
4877 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4881 /* Allocate a hardware table entry to hold large act. Two actions for
4882 * counter based large action
4884 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4888 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4891 /* Update the switch rule to add the counter action */
4892 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4894 ice_release_lock(rule_lock);
4899 ice_release_lock(rule_lock);
4900 /* only remove entry if it did not exist previously */
4902 ret = ice_remove_mac(hw, &l_head);
4907 /* This is mapping table entry that maps every word within a given protocol
4908 * structure to the real byte offset as per the specification of that
4910 * for example dst address is 3 words in ethertype header and corresponding
4911 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4912 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4913 * matching entry describing its field. This needs to be updated if new
4914 * structure is added to that union.
4916 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4917 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4918 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4919 { ICE_ETYPE_OL, { 0 } },
4920 { ICE_VLAN_OFOS, { 0, 2 } },
4921 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4922 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4923 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4924 26, 28, 30, 32, 34, 36, 38 } },
4925 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4926 26, 28, 30, 32, 34, 36, 38 } },
4927 { ICE_TCP_IL, { 0, 2 } },
4928 { ICE_UDP_OF, { 0, 2 } },
4929 { ICE_UDP_ILOS, { 0, 2 } },
4930 { ICE_SCTP_IL, { 0, 2 } },
4931 { ICE_VXLAN, { 8, 10, 12, 14 } },
4932 { ICE_GENEVE, { 8, 10, 12, 14 } },
4933 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4934 { ICE_NVGRE, { 0, 2, 4, 6 } },
4935 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4936 { ICE_PPPOE, { 0, 2, 4, 6 } },
4939 /* The following table describes preferred grouping of recipes.
4940 * If a recipe that needs to be programmed is a superset or matches one of the
4941 * following combinations, then the recipe needs to be chained as per the
4945 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4946 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4947 { ICE_MAC_IL, ICE_MAC_IL_HW },
4948 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4949 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4950 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4951 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4952 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4953 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4954 { ICE_TCP_IL, ICE_TCP_IL_HW },
4955 { ICE_UDP_OF, ICE_UDP_OF_HW },
4956 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4957 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4958 { ICE_VXLAN, ICE_UDP_OF_HW },
4959 { ICE_GENEVE, ICE_UDP_OF_HW },
4960 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4961 { ICE_NVGRE, ICE_GRE_OF_HW },
4962 { ICE_GTP, ICE_UDP_OF_HW },
4963 { ICE_PPPOE, ICE_PPPOE_HW },
4967 * ice_find_recp - find a recipe
4968 * @hw: pointer to the hardware structure
4969 * @lkup_exts: extension sequence to match
4971 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4973 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4975 bool refresh_required = true;
4976 struct ice_sw_recipe *recp;
4979 /* Walk through existing recipes to find a match */
4980 recp = hw->switch_info->recp_list;
4981 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4982 /* If recipe was not created for this ID, in SW bookkeeping,
4983 * check if FW has an entry for this recipe. If the FW has an
4984 * entry update it in our SW bookkeeping and continue with the
4987 if (!recp[i].recp_created)
4988 if (ice_get_recp_frm_fw(hw,
4989 hw->switch_info->recp_list, i,
4993 /* Skip inverse action recipes */
4994 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4995 ICE_AQ_RECIPE_ACT_INV_ACT)
4998 /* if number of words we are looking for match */
4999 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5000 struct ice_fv_word *a = lkup_exts->fv_words;
5001 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
5005 for (p = 0; p < lkup_exts->n_val_words; p++) {
5006 for (q = 0; q < recp[i].lkup_exts.n_val_words;
5008 if (a[p].off == b[q].off &&
5009 a[p].prot_id == b[q].prot_id)
5010 /* Found the "p"th word in the
5015 /* After walking through all the words in the
5016 * "i"th recipe if "p"th word was not found then
5017 * this recipe is not what we are looking for.
5018 * So break out from this loop and try the next
5021 if (q >= recp[i].lkup_exts.n_val_words) {
5026 /* If for "i"th recipe the found was never set to false
5027 * then it means we found our match
5030 return i; /* Return the recipe ID */
5033 return ICE_MAX_NUM_RECIPES;
5037 * ice_prot_type_to_id - get protocol ID from protocol type
5038 * @type: protocol type
5039 * @id: pointer to variable that will receive the ID
5041 * Returns true if found, false otherwise
5043 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5047 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5048 if (ice_prot_id_tbl[i].type == type) {
5049 *id = ice_prot_id_tbl[i].protocol_id;
5056 * ice_find_valid_words - count valid words
5057 * @rule: advanced rule with lookup information
5058 * @lkup_exts: byte offset extractions of the words that are valid
5060 * calculate valid words in a lookup rule using mask value
5063 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5064 struct ice_prot_lkup_ext *lkup_exts)
5066 u8 j, word, prot_id, ret_val;
5068 if (!ice_prot_type_to_id(rule->type, &prot_id))
5071 word = lkup_exts->n_val_words;
5073 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5074 if (((u16 *)&rule->m_u)[j] &&
5075 rule->type < ARRAY_SIZE(ice_prot_ext)) {
5076 /* No more space to accommodate */
5077 if (word >= ICE_MAX_CHAIN_WORDS)
5079 lkup_exts->fv_words[word].off =
5080 ice_prot_ext[rule->type].offs[j];
5081 lkup_exts->fv_words[word].prot_id =
5082 ice_prot_id_tbl[rule->type].protocol_id;
5083 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
5087 ret_val = word - lkup_exts->n_val_words;
5088 lkup_exts->n_val_words = word;
5094 * ice_create_first_fit_recp_def - Create a recipe grouping
5095 * @hw: pointer to the hardware structure
5096 * @lkup_exts: an array of protocol header extractions
5097 * @rg_list: pointer to a list that stores new recipe groups
5098 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
5100 * Using first fit algorithm, take all the words that are still not done
5101 * and start grouping them in 4-word groups. Each group makes up one
5104 static enum ice_status
5105 ice_create_first_fit_recp_def(struct ice_hw *hw,
5106 struct ice_prot_lkup_ext *lkup_exts,
5107 struct LIST_HEAD_TYPE *rg_list,
5110 struct ice_pref_recipe_group *grp = NULL;
5115 /* Walk through every word in the rule to check if it is not done. If so
5116 * then this word needs to be part of a new recipe.
5118 for (j = 0; j < lkup_exts->n_val_words; j++)
5119 if (!ice_is_bit_set(lkup_exts->done, j)) {
5121 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
5122 struct ice_recp_grp_entry *entry;
5124 entry = (struct ice_recp_grp_entry *)
5125 ice_malloc(hw, sizeof(*entry));
5127 return ICE_ERR_NO_MEMORY;
5128 LIST_ADD(&entry->l_entry, rg_list);
5129 grp = &entry->r_group;
5133 grp->pairs[grp->n_val_pairs].prot_id =
5134 lkup_exts->fv_words[j].prot_id;
5135 grp->pairs[grp->n_val_pairs].off =
5136 lkup_exts->fv_words[j].off;
5137 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
5145 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
5146 * @hw: pointer to the hardware structure
5147 * @fv_list: field vector with the extraction sequence information
5148 * @rg_list: recipe groupings with protocol-offset pairs
5150 * Helper function to fill in the field vector indices for protocol-offset
5151 * pairs. These indexes are then ultimately programmed into a recipe.
5153 static enum ice_status
5154 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5155 struct LIST_HEAD_TYPE *rg_list)
5157 struct ice_sw_fv_list_entry *fv;
5158 struct ice_recp_grp_entry *rg;
5159 struct ice_fv_word *fv_ext;
5161 if (LIST_EMPTY(fv_list))
5164 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5165 fv_ext = fv->fv_ptr->ew;
5167 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5170 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5171 struct ice_fv_word *pr;
5176 pr = &rg->r_group.pairs[i];
5177 mask = rg->r_group.mask[i];
5179 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5180 if (fv_ext[j].prot_id == pr->prot_id &&
5181 fv_ext[j].off == pr->off) {
5184 /* Store index of field vector */
5186 /* Mask is given by caller as big
5187 * endian, but sent to FW as little
5190 rg->fv_mask[i] = mask << 8 | mask >> 8;
5194 /* Protocol/offset could not be found, caller gave an
5198 return ICE_ERR_PARAM;
5206 * ice_find_free_recp_res_idx - find free result indexes for recipe
5207 * @hw: pointer to hardware structure
5208 * @profiles: bitmap of profiles that will be associated with the new recipe
5209 * @free_idx: pointer to variable to receive the free index bitmap
5211 * The algorithm used here is:
5212 * 1. When creating a new recipe, create a set P which contains all
5213 * Profiles that will be associated with our new recipe
5215 * 2. For each Profile p in set P:
5216 * a. Add all recipes associated with Profile p into set R
5217 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5218 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5219 * i. Or just assume they all have the same possible indexes:
5221 * i.e., PossibleIndexes = 0x0000F00000000000
5223 * 3. For each Recipe r in set R:
5224 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5225 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5227 * FreeIndexes will contain the bits indicating the indexes free for use,
5228 * then the code needs to update the recipe[r].used_result_idx_bits to
5229 * indicate which indexes were selected for use by this recipe.
5232 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5233 ice_bitmap_t *free_idx)
5235 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5236 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5237 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5241 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5242 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5243 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5244 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5246 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5247 ice_set_bit(count, possible_idx);
5249 /* For each profile we are going to associate the recipe with, add the
5250 * recipes that are associated with that profile. This will give us
5251 * the set of recipes that our recipe may collide with. Also, determine
5252 * what possible result indexes are usable given this set of profiles.
5255 while (ICE_MAX_NUM_PROFILES >
5256 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5257 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5258 ICE_MAX_NUM_RECIPES);
5259 ice_and_bitmap(possible_idx, possible_idx,
5260 hw->switch_info->prof_res_bm[bit],
5265 /* For each recipe that our new recipe may collide with, determine
5266 * which indexes have been used.
5268 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5269 if (ice_is_bit_set(recipes, bit)) {
5270 ice_or_bitmap(used_idx, used_idx,
5271 hw->switch_info->recp_list[bit].res_idxs,
5275 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5277 /* return number of free indexes */
5280 while (ICE_MAX_FV_WORDS >
5281 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5290 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5291 * @hw: pointer to hardware structure
5292 * @rm: recipe management list entry
5293 * @match_tun: if field vector index for tunnel needs to be programmed
5294 * @profiles: bitmap of profiles that will be assocated.
5296 static enum ice_status
5297 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5298 bool match_tun, ice_bitmap_t *profiles)
5300 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5301 struct ice_aqc_recipe_data_elem *tmp;
5302 struct ice_aqc_recipe_data_elem *buf;
5303 struct ice_recp_grp_entry *entry;
5304 enum ice_status status;
5310 /* When more than one recipe are required, another recipe is needed to
5311 * chain them together. Matching a tunnel metadata ID takes up one of
5312 * the match fields in the chaining recipe reducing the number of
5313 * chained recipes by one.
5315 /* check number of free result indices */
5316 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5317 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5319 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5320 free_res_idx, rm->n_grp_count);
5322 if (rm->n_grp_count > 1) {
5323 if (rm->n_grp_count > free_res_idx)
5324 return ICE_ERR_MAX_LIMIT;
5329 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5330 ICE_MAX_NUM_RECIPES,
5333 return ICE_ERR_NO_MEMORY;
5335 buf = (struct ice_aqc_recipe_data_elem *)
5336 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5338 status = ICE_ERR_NO_MEMORY;
5342 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5343 recipe_count = ICE_MAX_NUM_RECIPES;
5344 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5346 if (status || recipe_count == 0)
5349 /* Allocate the recipe resources, and configure them according to the
5350 * match fields from protocol headers and extracted field vectors.
5352 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5353 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5356 status = ice_alloc_recipe(hw, &entry->rid);
5360 /* Clear the result index of the located recipe, as this will be
5361 * updated, if needed, later in the recipe creation process.
5363 tmp[0].content.result_indx = 0;
5365 buf[recps] = tmp[0];
5366 buf[recps].recipe_indx = (u8)entry->rid;
5367 /* if the recipe is a non-root recipe RID should be programmed
5368 * as 0 for the rules to be applied correctly.
5370 buf[recps].content.rid = 0;
5371 ice_memset(&buf[recps].content.lkup_indx, 0,
5372 sizeof(buf[recps].content.lkup_indx),
5375 /* All recipes use look-up index 0 to match switch ID. */
5376 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5377 buf[recps].content.mask[0] =
5378 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5379 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5382 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5383 buf[recps].content.lkup_indx[i] = 0x80;
5384 buf[recps].content.mask[i] = 0;
5387 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5388 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5389 buf[recps].content.mask[i + 1] =
5390 CPU_TO_LE16(entry->fv_mask[i]);
5393 if (rm->n_grp_count > 1) {
5394 /* Checks to see if there really is a valid result index
5397 if (chain_idx >= ICE_MAX_FV_WORDS) {
5398 ice_debug(hw, ICE_DBG_SW,
5399 "No chain index available\n");
5400 status = ICE_ERR_MAX_LIMIT;
5404 entry->chain_idx = chain_idx;
5405 buf[recps].content.result_indx =
5406 ICE_AQ_RECIPE_RESULT_EN |
5407 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5408 ICE_AQ_RECIPE_RESULT_DATA_M);
5409 ice_clear_bit(chain_idx, result_idx_bm);
5410 chain_idx = ice_find_first_bit(result_idx_bm,
5414 /* fill recipe dependencies */
5415 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5416 ICE_MAX_NUM_RECIPES);
5417 ice_set_bit(buf[recps].recipe_indx,
5418 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5419 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5423 if (rm->n_grp_count == 1) {
5424 rm->root_rid = buf[0].recipe_indx;
5425 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5426 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5427 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5428 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5429 sizeof(buf[0].recipe_bitmap),
5430 ICE_NONDMA_TO_NONDMA);
5432 status = ICE_ERR_BAD_PTR;
5435 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5436 * the recipe which is getting created if specified
5437 * by user. Usually any advanced switch filter, which results
5438 * into new extraction sequence, ended up creating a new recipe
5439 * of type ROOT and usually recipes are associated with profiles
5440 * Switch rule referreing newly created recipe, needs to have
5441 * either/or 'fwd' or 'join' priority, otherwise switch rule
5442 * evaluation will not happen correctly. In other words, if
5443 * switch rule to be evaluated on priority basis, then recipe
5444 * needs to have priority, otherwise it will be evaluated last.
5446 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5448 struct ice_recp_grp_entry *last_chain_entry;
5451 /* Allocate the last recipe that will chain the outcomes of the
5452 * other recipes together
5454 status = ice_alloc_recipe(hw, &rid);
5458 buf[recps].recipe_indx = (u8)rid;
5459 buf[recps].content.rid = (u8)rid;
5460 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5461 /* the new entry created should also be part of rg_list to
5462 * make sure we have complete recipe
5464 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5465 sizeof(*last_chain_entry));
5466 if (!last_chain_entry) {
5467 status = ICE_ERR_NO_MEMORY;
5470 last_chain_entry->rid = rid;
5471 ice_memset(&buf[recps].content.lkup_indx, 0,
5472 sizeof(buf[recps].content.lkup_indx),
5474 /* All recipes use look-up index 0 to match switch ID. */
5475 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5476 buf[recps].content.mask[0] =
5477 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5478 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5479 buf[recps].content.lkup_indx[i] =
5480 ICE_AQ_RECIPE_LKUP_IGNORE;
5481 buf[recps].content.mask[i] = 0;
5485 /* update r_bitmap with the recp that is used for chaining */
5486 ice_set_bit(rid, rm->r_bitmap);
5487 /* this is the recipe that chains all the other recipes so it
5488 * should not have a chaining ID to indicate the same
5490 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5491 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5493 last_chain_entry->fv_idx[i] = entry->chain_idx;
5494 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5495 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5496 ice_set_bit(entry->rid, rm->r_bitmap);
5498 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5499 if (sizeof(buf[recps].recipe_bitmap) >=
5500 sizeof(rm->r_bitmap)) {
5501 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5502 sizeof(buf[recps].recipe_bitmap),
5503 ICE_NONDMA_TO_NONDMA);
5505 status = ICE_ERR_BAD_PTR;
5508 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5510 /* To differentiate among different UDP tunnels, a meta data ID
5514 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5515 buf[recps].content.mask[i] =
5516 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5520 rm->root_rid = (u8)rid;
5522 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5526 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5527 ice_release_change_lock(hw);
5531 /* Every recipe that just got created add it to the recipe
5534 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5535 struct ice_switch_info *sw = hw->switch_info;
5536 bool is_root, idx_found = false;
5537 struct ice_sw_recipe *recp;
5538 u16 idx, buf_idx = 0;
5540 /* find buffer index for copying some data */
5541 for (idx = 0; idx < rm->n_grp_count; idx++)
5542 if (buf[idx].recipe_indx == entry->rid) {
5548 status = ICE_ERR_OUT_OF_RANGE;
5552 recp = &sw->recp_list[entry->rid];
5553 is_root = (rm->root_rid == entry->rid);
5554 recp->is_root = is_root;
5556 recp->root_rid = entry->rid;
5557 recp->big_recp = (is_root && rm->n_grp_count > 1);
5559 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5560 entry->r_group.n_val_pairs *
5561 sizeof(struct ice_fv_word),
5562 ICE_NONDMA_TO_NONDMA);
5564 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5565 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5567 /* Copy non-result fv index values and masks to recipe. This
5568 * call will also update the result recipe bitmask.
5570 ice_collect_result_idx(&buf[buf_idx], recp);
5572 /* for non-root recipes, also copy to the root, this allows
5573 * easier matching of a complete chained recipe
5576 ice_collect_result_idx(&buf[buf_idx],
5577 &sw->recp_list[rm->root_rid]);
5579 recp->n_ext_words = entry->r_group.n_val_pairs;
5580 recp->chain_idx = entry->chain_idx;
5581 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5582 recp->n_grp_count = rm->n_grp_count;
5583 recp->tun_type = rm->tun_type;
5584 recp->recp_created = true;
5599 * ice_create_recipe_group - creates recipe group
5600 * @hw: pointer to hardware structure
5601 * @rm: recipe management list entry
5602 * @lkup_exts: lookup elements
5604 static enum ice_status
5605 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5606 struct ice_prot_lkup_ext *lkup_exts)
5608 enum ice_status status;
5611 rm->n_grp_count = 0;
5613 /* Create recipes for words that are marked not done by packing them
5616 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5617 &rm->rg_list, &recp_count);
5619 rm->n_grp_count += recp_count;
5620 rm->n_ext_words = lkup_exts->n_val_words;
5621 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5622 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5623 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5624 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5631 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5632 * @hw: pointer to hardware structure
5633 * @lkups: lookup elements or match criteria for the advanced recipe, one
5634 * structure per protocol header
5635 * @lkups_cnt: number of protocols
5636 * @bm: bitmap of field vectors to consider
5637 * @fv_list: pointer to a list that holds the returned field vectors
5639 static enum ice_status
5640 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5641 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5643 enum ice_status status;
5647 prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5649 return ICE_ERR_NO_MEMORY;
5651 for (i = 0; i < lkups_cnt; i++)
5652 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5653 status = ICE_ERR_CFG;
5657 /* Find field vectors that include all specified protocol types */
5658 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5661 ice_free(hw, prot_ids);
5666 * ice_add_special_words - Add words that are not protocols, such as metadata
5667 * @rinfo: other information regarding the rule e.g. priority and action info
5668 * @lkup_exts: lookup word structure
5670 static enum ice_status
5671 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5672 struct ice_prot_lkup_ext *lkup_exts)
5674 /* If this is a tunneled packet, then add recipe index to match the
5675 * tunnel bit in the packet metadata flags.
5677 if (rinfo->tun_type != ICE_NON_TUN) {
5678 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5679 u8 word = lkup_exts->n_val_words++;
5681 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5682 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5684 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5686 return ICE_ERR_MAX_LIMIT;
5693 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5694 * @hw: pointer to hardware structure
5695 * @rinfo: other information regarding the rule e.g. priority and action info
5696 * @bm: pointer to memory for returning the bitmap of field vectors
5699 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5702 enum ice_prof_type prof_type;
5704 switch (rinfo->tun_type) {
5706 prof_type = ICE_PROF_NON_TUN;
5708 case ICE_ALL_TUNNELS:
5709 prof_type = ICE_PROF_TUN_ALL;
5711 case ICE_SW_TUN_VXLAN_GPE:
5712 case ICE_SW_TUN_GENEVE:
5713 case ICE_SW_TUN_VXLAN:
5714 case ICE_SW_TUN_UDP:
5715 case ICE_SW_TUN_GTP:
5716 prof_type = ICE_PROF_TUN_UDP;
5718 case ICE_SW_TUN_NVGRE:
5719 prof_type = ICE_PROF_TUN_GRE;
5721 case ICE_SW_TUN_PPPOE:
5722 prof_type = ICE_PROF_TUN_PPPOE;
5724 case ICE_SW_TUN_AND_NON_TUN:
5726 prof_type = ICE_PROF_ALL;
5730 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5734 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5735 * @hw: pointer to hardware structure
5736 * @lkups: lookup elements or match criteria for the advanced recipe, one
5737 * structure per protocol header
5738 * @lkups_cnt: number of protocols
5739 * @rinfo: other information regarding the rule e.g. priority and action info
5740 * @rid: return the recipe ID of the recipe created
5742 static enum ice_status
5743 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5744 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5746 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5747 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5748 struct ice_prot_lkup_ext *lkup_exts;
5749 struct ice_recp_grp_entry *r_entry;
5750 struct ice_sw_fv_list_entry *fvit;
5751 struct ice_recp_grp_entry *r_tmp;
5752 struct ice_sw_fv_list_entry *tmp;
5753 enum ice_status status = ICE_SUCCESS;
5754 struct ice_sw_recipe *rm;
5755 bool match_tun = false;
5759 return ICE_ERR_PARAM;
5761 lkup_exts = (struct ice_prot_lkup_ext *)
5762 ice_malloc(hw, sizeof(*lkup_exts));
5764 return ICE_ERR_NO_MEMORY;
5766 /* Determine the number of words to be matched and if it exceeds a
5767 * recipe's restrictions
5769 for (i = 0; i < lkups_cnt; i++) {
5772 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5773 status = ICE_ERR_CFG;
5774 goto err_free_lkup_exts;
5777 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5779 status = ICE_ERR_CFG;
5780 goto err_free_lkup_exts;
5784 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5786 status = ICE_ERR_NO_MEMORY;
5787 goto err_free_lkup_exts;
5790 /* Get field vectors that contain fields extracted from all the protocol
5791 * headers being programmed.
5793 INIT_LIST_HEAD(&rm->fv_list);
5794 INIT_LIST_HEAD(&rm->rg_list);
5796 /* Get bitmap of field vectors (profiles) that are compatible with the
5797 * rule request; only these will be searched in the subsequent call to
5800 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5802 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5806 /* Group match words into recipes using preferred recipe grouping
5809 status = ice_create_recipe_group(hw, rm, lkup_exts);
5813 /* There is only profile for UDP tunnels. So, it is necessary to use a
5814 * metadata ID flag to differentiate different tunnel types. A separate
5815 * recipe needs to be used for the metadata.
5817 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5818 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5819 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5822 /* set the recipe priority if specified */
5823 rm->priority = (u8)rinfo->priority;
5825 /* Find offsets from the field vector. Pick the first one for all the
5828 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5832 /* get bitmap of all profiles the recipe will be associated with */
5833 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5834 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5836 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5837 ice_set_bit((u16)fvit->profile_id, profiles);
5840 /* Create any special protocol/offset pairs, such as looking at tunnel
5841 * bits by extracting metadata
5843 status = ice_add_special_words(rinfo, lkup_exts);
5845 goto err_free_lkup_exts;
5847 /* Look for a recipe which matches our requested fv / mask list */
5848 *rid = ice_find_recp(hw, lkup_exts);
5849 if (*rid < ICE_MAX_NUM_RECIPES)
5850 /* Success if found a recipe that match the existing criteria */
5853 /* Recipe we need does not exist, add a recipe */
5854 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5858 /* Associate all the recipes created with all the profiles in the
5859 * common field vector.
5861 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5863 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5866 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5867 (u8 *)r_bitmap, NULL);
5871 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5872 ICE_MAX_NUM_RECIPES);
5873 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5877 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5880 ice_release_change_lock(hw);
5885 /* Update profile to recipe bitmap array */
5886 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5887 ICE_MAX_NUM_RECIPES);
5889 /* Update recipe to profile bitmap array */
5890 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5891 if (ice_is_bit_set(r_bitmap, j))
5892 ice_set_bit((u16)fvit->profile_id,
5893 recipe_to_profile[j]);
5896 *rid = rm->root_rid;
5897 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5898 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5900 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5901 ice_recp_grp_entry, l_entry) {
5902 LIST_DEL(&r_entry->l_entry);
5903 ice_free(hw, r_entry);
5906 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5908 LIST_DEL(&fvit->list_entry);
5913 ice_free(hw, rm->root_buf);
5918 ice_free(hw, lkup_exts);
5924 * ice_find_dummy_packet - find dummy packet by tunnel type
5926 * @lkups: lookup elements or match criteria for the advanced recipe, one
5927 * structure per protocol header
5928 * @lkups_cnt: number of protocols
5929 * @tun_type: tunnel type from the match criteria
5930 * @pkt: dummy packet to fill according to filter match criteria
5931 * @pkt_len: packet length of dummy packet
5932 * @offsets: pointer to receive the pointer to the offsets for the packet
5935 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5936 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5938 const struct ice_dummy_pkt_offsets **offsets)
5940 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5944 for (i = 0; i < lkups_cnt; i++) {
5945 if (lkups[i].type == ICE_UDP_ILOS)
5947 else if (lkups[i].type == ICE_TCP_IL)
5949 else if (lkups[i].type == ICE_IPV6_OFOS)
5951 else if (lkups[i].type == ICE_VLAN_OFOS)
5953 else if (lkups[i].type == ICE_IPV4_OFOS &&
5954 lkups[i].h_u.ipv4_hdr.protocol ==
5955 ICE_IPV4_NVGRE_PROTO_ID &&
5956 lkups[i].m_u.ipv4_hdr.protocol ==
5959 else if (lkups[i].type == ICE_PPPOE &&
5960 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5961 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
5962 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
5967 if (tun_type == ICE_SW_TUN_GTP) {
5968 *pkt = dummy_udp_gtp_packet;
5969 *pkt_len = sizeof(dummy_udp_gtp_packet);
5970 *offsets = dummy_udp_gtp_packet_offsets;
5973 if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
5974 *pkt = dummy_pppoe_ipv6_packet;
5975 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
5976 *offsets = dummy_pppoe_packet_offsets;
5978 } else if (tun_type == ICE_SW_TUN_PPPOE) {
5979 *pkt = dummy_pppoe_ipv4_packet;
5980 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
5981 *offsets = dummy_pppoe_packet_offsets;
5985 if (tun_type == ICE_ALL_TUNNELS) {
5986 *pkt = dummy_gre_udp_packet;
5987 *pkt_len = sizeof(dummy_gre_udp_packet);
5988 *offsets = dummy_gre_udp_packet_offsets;
5992 if (tun_type == ICE_SW_TUN_NVGRE || gre) {
5994 *pkt = dummy_gre_tcp_packet;
5995 *pkt_len = sizeof(dummy_gre_tcp_packet);
5996 *offsets = dummy_gre_tcp_packet_offsets;
6000 *pkt = dummy_gre_udp_packet;
6001 *pkt_len = sizeof(dummy_gre_udp_packet);
6002 *offsets = dummy_gre_udp_packet_offsets;
6006 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
6007 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
6009 *pkt = dummy_udp_tun_tcp_packet;
6010 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
6011 *offsets = dummy_udp_tun_tcp_packet_offsets;
6015 *pkt = dummy_udp_tun_udp_packet;
6016 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
6017 *offsets = dummy_udp_tun_udp_packet_offsets;
6023 *pkt = dummy_vlan_udp_packet;
6024 *pkt_len = sizeof(dummy_vlan_udp_packet);
6025 *offsets = dummy_vlan_udp_packet_offsets;
6028 *pkt = dummy_udp_packet;
6029 *pkt_len = sizeof(dummy_udp_packet);
6030 *offsets = dummy_udp_packet_offsets;
6032 } else if (udp && ipv6) {
6034 *pkt = dummy_vlan_udp_ipv6_packet;
6035 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
6036 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
6039 *pkt = dummy_udp_ipv6_packet;
6040 *pkt_len = sizeof(dummy_udp_ipv6_packet);
6041 *offsets = dummy_udp_ipv6_packet_offsets;
6043 } else if ((tcp && ipv6) || ipv6) {
6045 *pkt = dummy_vlan_tcp_ipv6_packet;
6046 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
6047 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
6050 *pkt = dummy_tcp_ipv6_packet;
6051 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
6052 *offsets = dummy_tcp_ipv6_packet_offsets;
6057 *pkt = dummy_vlan_tcp_packet;
6058 *pkt_len = sizeof(dummy_vlan_tcp_packet);
6059 *offsets = dummy_vlan_tcp_packet_offsets;
6061 *pkt = dummy_tcp_packet;
6062 *pkt_len = sizeof(dummy_tcp_packet);
6063 *offsets = dummy_tcp_packet_offsets;
6068 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
6070 * @lkups: lookup elements or match criteria for the advanced recipe, one
6071 * structure per protocol header
6072 * @lkups_cnt: number of protocols
6073 * @s_rule: stores rule information from the match criteria
6074 * @dummy_pkt: dummy packet to fill according to filter match criteria
6075 * @pkt_len: packet length of dummy packet
6076 * @offsets: offset info for the dummy packet
6078 static enum ice_status
6079 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6080 struct ice_aqc_sw_rules_elem *s_rule,
6081 const u8 *dummy_pkt, u16 pkt_len,
6082 const struct ice_dummy_pkt_offsets *offsets)
6087 /* Start with a packet with a pre-defined/dummy content. Then, fill
6088 * in the header values to be looked up or matched.
6090 pkt = s_rule->pdata.lkup_tx_rx.hdr;
6092 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
6094 for (i = 0; i < lkups_cnt; i++) {
6095 enum ice_protocol_type type;
6096 u16 offset = 0, len = 0, j;
6099 /* find the start of this layer; it should be found since this
6100 * was already checked when search for the dummy packet
6102 type = lkups[i].type;
6103 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
6104 if (type == offsets[j].type) {
6105 offset = offsets[j].offset;
6110 /* this should never happen in a correct calling sequence */
6112 return ICE_ERR_PARAM;
6114 switch (lkups[i].type) {
6117 len = sizeof(struct ice_ether_hdr);
6120 len = sizeof(struct ice_ethtype_hdr);
6123 len = sizeof(struct ice_vlan_hdr);
6127 len = sizeof(struct ice_ipv4_hdr);
6131 len = sizeof(struct ice_ipv6_hdr);
6136 len = sizeof(struct ice_l4_hdr);
6139 len = sizeof(struct ice_sctp_hdr);
6142 len = sizeof(struct ice_nvgre);
6147 len = sizeof(struct ice_udp_tnl_hdr);
6151 len = sizeof(struct ice_udp_gtp_hdr);
6154 len = sizeof(struct ice_pppoe_hdr);
6157 return ICE_ERR_PARAM;
6160 /* the length should be a word multiple */
6161 if (len % ICE_BYTES_PER_WORD)
6164 /* We have the offset to the header start, the length, the
6165 * caller's header values and mask. Use this information to
6166 * copy the data into the dummy packet appropriately based on
6167 * the mask. Note that we need to only write the bits as
6168 * indicated by the mask to make sure we don't improperly write
6169 * over any significant packet data.
6171 for (j = 0; j < len / sizeof(u16); j++)
6172 if (((u16 *)&lkups[i].m_u)[j])
6173 ((u16 *)(pkt + offset))[j] =
6174 (((u16 *)(pkt + offset))[j] &
6175 ~((u16 *)&lkups[i].m_u)[j]) |
6176 (((u16 *)&lkups[i].h_u)[j] &
6177 ((u16 *)&lkups[i].m_u)[j]);
6180 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6186 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6187 * @hw: pointer to the hardware structure
6188 * @tun_type: tunnel type
6189 * @pkt: dummy packet to fill in
6190 * @offsets: offset info for the dummy packet
6192 static enum ice_status
6193 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6194 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6199 case ICE_SW_TUN_AND_NON_TUN:
6200 case ICE_SW_TUN_VXLAN_GPE:
6201 case ICE_SW_TUN_VXLAN:
6202 case ICE_SW_TUN_UDP:
6203 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6207 case ICE_SW_TUN_GENEVE:
6208 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6213 /* Nothing needs to be done for this tunnel type */
6217 /* Find the outer UDP protocol header and insert the port number */
6218 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6219 if (offsets[i].type == ICE_UDP_OF) {
6220 struct ice_l4_hdr *hdr;
6223 offset = offsets[i].offset;
6224 hdr = (struct ice_l4_hdr *)&pkt[offset];
6225 hdr->dst_port = CPU_TO_BE16(open_port);
6235 * ice_find_adv_rule_entry - Search a rule entry
6236 * @hw: pointer to the hardware structure
6237 * @lkups: lookup elements or match criteria for the advanced recipe, one
6238 * structure per protocol header
6239 * @lkups_cnt: number of protocols
6240 * @recp_id: recipe ID for which we are finding the rule
6241 * @rinfo: other information regarding the rule e.g. priority and action info
6243 * Helper function to search for a given advance rule entry
6244 * Returns pointer to entry storing the rule if found
6246 static struct ice_adv_fltr_mgmt_list_entry *
6247 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6248 u16 lkups_cnt, u16 recp_id,
6249 struct ice_adv_rule_info *rinfo)
6251 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6252 struct ice_switch_info *sw = hw->switch_info;
6255 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6256 ice_adv_fltr_mgmt_list_entry, list_entry) {
6257 bool lkups_matched = true;
6259 if (lkups_cnt != list_itr->lkups_cnt)
6261 for (i = 0; i < list_itr->lkups_cnt; i++)
6262 if (memcmp(&list_itr->lkups[i], &lkups[i],
6264 lkups_matched = false;
6267 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6268 rinfo->tun_type == list_itr->rule_info.tun_type &&
6276 * ice_adv_add_update_vsi_list
6277 * @hw: pointer to the hardware structure
6278 * @m_entry: pointer to current adv filter management list entry
6279 * @cur_fltr: filter information from the book keeping entry
6280 * @new_fltr: filter information with the new VSI to be added
6282 * Call AQ command to add or update previously created VSI list with new VSI.
6284 * Helper function to do book keeping associated with adding filter information
6285 * The algorithm to do the booking keeping is described below :
6286 * When a VSI needs to subscribe to a given advanced filter
6287 * if only one VSI has been added till now
6288 * Allocate a new VSI list and add two VSIs
6289 * to this list using switch rule command
6290 * Update the previously created switch rule with the
6291 * newly created VSI list ID
6292 * if a VSI list was previously created
6293 * Add the new VSI to the previously created VSI list set
6294 * using the update switch rule command
6296 static enum ice_status
6297 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6298 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6299 struct ice_adv_rule_info *cur_fltr,
6300 struct ice_adv_rule_info *new_fltr)
6302 enum ice_status status;
6303 u16 vsi_list_id = 0;
6305 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6306 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6307 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6308 return ICE_ERR_NOT_IMPL;
6310 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6311 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6312 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6313 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6314 return ICE_ERR_NOT_IMPL;
6316 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6317 /* Only one entry existed in the mapping and it was not already
6318 * a part of a VSI list. So, create a VSI list with the old and
6321 struct ice_fltr_info tmp_fltr;
6322 u16 vsi_handle_arr[2];
6324 /* A rule already exists with the new VSI being added */
6325 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6326 new_fltr->sw_act.fwd_id.hw_vsi_id)
6327 return ICE_ERR_ALREADY_EXISTS;
6329 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6330 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6331 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6337 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6338 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6339 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6340 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6341 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
6343 /* Update the previous switch rule of "forward to VSI" to
6346 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6350 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6351 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6352 m_entry->vsi_list_info =
6353 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6356 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6358 if (!m_entry->vsi_list_info)
6361 /* A rule already exists with the new VSI being added */
6362 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6365 /* Update the previously created VSI list set with
6366 * the new VSI ID passed in
6368 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6370 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6372 ice_aqc_opc_update_sw_rules,
6374 /* update VSI list mapping info with new VSI ID */
6376 ice_set_bit(vsi_handle,
6377 m_entry->vsi_list_info->vsi_map);
6380 m_entry->vsi_count++;
6385 * ice_add_adv_rule - helper function to create an advanced switch rule
6386 * @hw: pointer to the hardware structure
6387 * @lkups: information on the words that needs to be looked up. All words
6388 * together makes one recipe
6389 * @lkups_cnt: num of entries in the lkups array
6390 * @rinfo: other information related to the rule that needs to be programmed
6391 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6392 * ignored is case of error.
6394 * This function can program only 1 rule at a time. The lkups is used to
6395 * describe the all the words that forms the "lookup" portion of the recipe.
6396 * These words can span multiple protocols. Callers to this function need to
6397 * pass in a list of protocol headers with lookup information along and mask
6398 * that determines which words are valid from the given protocol header.
6399 * rinfo describes other information related to this rule such as forwarding
6400 * IDs, priority of this rule, etc.
6403 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6404 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6405 struct ice_rule_query_data *added_entry)
6407 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6408 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6409 const struct ice_dummy_pkt_offsets *pkt_offsets;
6410 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6411 struct LIST_HEAD_TYPE *rule_head;
6412 struct ice_switch_info *sw;
6413 enum ice_status status;
6414 const u8 *pkt = NULL;
6419 /* Initialize profile to result index bitmap */
6420 if (!hw->switch_info->prof_res_bm_init) {
6421 hw->switch_info->prof_res_bm_init = 1;
6422 ice_init_prof_result_bm(hw);
6426 return ICE_ERR_PARAM;
6428 /* get # of words we need to match */
6430 for (i = 0; i < lkups_cnt; i++) {
6433 ptr = (u16 *)&lkups[i].m_u;
6434 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6438 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6439 return ICE_ERR_PARAM;
6441 /* make sure that we can locate a dummy packet */
6442 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6445 status = ICE_ERR_PARAM;
6446 goto err_ice_add_adv_rule;
6449 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6450 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6451 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6452 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6455 vsi_handle = rinfo->sw_act.vsi_handle;
6456 if (!ice_is_vsi_valid(hw, vsi_handle))
6457 return ICE_ERR_PARAM;
6459 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6460 rinfo->sw_act.fwd_id.hw_vsi_id =
6461 ice_get_hw_vsi_num(hw, vsi_handle);
6462 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6463 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6465 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6468 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6470 /* we have to add VSI to VSI_LIST and increment vsi_count.
6471 * Also Update VSI list so that we can change forwarding rule
6472 * if the rule already exists, we will check if it exists with
6473 * same vsi_id, if not then add it to the VSI list if it already
6474 * exists if not then create a VSI list and add the existing VSI
6475 * ID and the new VSI ID to the list
6476 * We will add that VSI to the list
6478 status = ice_adv_add_update_vsi_list(hw, m_entry,
6479 &m_entry->rule_info,
6482 added_entry->rid = rid;
6483 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6484 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6488 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6489 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6491 return ICE_ERR_NO_MEMORY;
6492 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6493 switch (rinfo->sw_act.fltr_act) {
6494 case ICE_FWD_TO_VSI:
6495 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6496 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6497 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6500 act |= ICE_SINGLE_ACT_TO_Q;
6501 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6502 ICE_SINGLE_ACT_Q_INDEX_M;
6504 case ICE_FWD_TO_QGRP:
6505 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6506 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6507 act |= ICE_SINGLE_ACT_TO_Q;
6508 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6509 ICE_SINGLE_ACT_Q_INDEX_M;
6510 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6511 ICE_SINGLE_ACT_Q_REGION_M;
6513 case ICE_DROP_PACKET:
6514 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6515 ICE_SINGLE_ACT_VALID_BIT;
6518 status = ICE_ERR_CFG;
6519 goto err_ice_add_adv_rule;
6522 /* set the rule LOOKUP type based on caller specified 'RX'
6523 * instead of hardcoding it to be either LOOKUP_TX/RX
6525 * for 'RX' set the source to be the port number
6526 * for 'TX' set the source to be the source HW VSI number (determined
6530 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6531 s_rule->pdata.lkup_tx_rx.src =
6532 CPU_TO_LE16(hw->port_info->lport);
6534 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6535 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6538 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6539 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6541 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6542 pkt_len, pkt_offsets);
6544 goto err_ice_add_adv_rule;
6546 if (rinfo->tun_type != ICE_NON_TUN &&
6547 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6548 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6549 s_rule->pdata.lkup_tx_rx.hdr,
6552 goto err_ice_add_adv_rule;
6555 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6556 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6559 goto err_ice_add_adv_rule;
6560 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6561 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6563 status = ICE_ERR_NO_MEMORY;
6564 goto err_ice_add_adv_rule;
6567 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6568 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6569 ICE_NONDMA_TO_NONDMA);
6570 if (!adv_fltr->lkups) {
6571 status = ICE_ERR_NO_MEMORY;
6572 goto err_ice_add_adv_rule;
6575 adv_fltr->lkups_cnt = lkups_cnt;
6576 adv_fltr->rule_info = *rinfo;
6577 adv_fltr->rule_info.fltr_rule_id =
6578 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6579 sw = hw->switch_info;
6580 sw->recp_list[rid].adv_rule = true;
6581 rule_head = &sw->recp_list[rid].filt_rules;
6583 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6584 struct ice_fltr_info tmp_fltr;
6586 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6587 tmp_fltr.fltr_rule_id =
6588 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6589 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6590 tmp_fltr.fwd_id.hw_vsi_id =
6591 ice_get_hw_vsi_num(hw, vsi_handle);
6592 tmp_fltr.vsi_handle = vsi_handle;
6593 /* Update the previous switch rule of "forward to VSI" to
6596 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6598 goto err_ice_add_adv_rule;
6599 adv_fltr->vsi_count = 1;
6602 /* Add rule entry to book keeping list */
6603 LIST_ADD(&adv_fltr->list_entry, rule_head);
6605 added_entry->rid = rid;
6606 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6607 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6609 err_ice_add_adv_rule:
6610 if (status && adv_fltr) {
6611 ice_free(hw, adv_fltr->lkups);
6612 ice_free(hw, adv_fltr);
6615 ice_free(hw, s_rule);
6621 * ice_adv_rem_update_vsi_list
6622 * @hw: pointer to the hardware structure
6623 * @vsi_handle: VSI handle of the VSI to remove
6624 * @fm_list: filter management entry for which the VSI list management needs to
6627 static enum ice_status
6628 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6629 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6631 struct ice_vsi_list_map_info *vsi_list_info;
6632 enum ice_sw_lkup_type lkup_type;
6633 enum ice_status status;
6636 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6637 fm_list->vsi_count == 0)
6638 return ICE_ERR_PARAM;
6640 /* A rule with the VSI being removed does not exist */
6641 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6642 return ICE_ERR_DOES_NOT_EXIST;
6644 lkup_type = ICE_SW_LKUP_LAST;
6645 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6646 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6647 ice_aqc_opc_update_sw_rules,
6652 fm_list->vsi_count--;
6653 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6654 vsi_list_info = fm_list->vsi_list_info;
6655 if (fm_list->vsi_count == 1) {
6656 struct ice_fltr_info tmp_fltr;
6659 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6661 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6662 return ICE_ERR_OUT_OF_RANGE;
6664 /* Make sure VSI list is empty before removing it below */
6665 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6667 ice_aqc_opc_update_sw_rules,
6672 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
6673 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6674 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6675 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6676 tmp_fltr.fwd_id.hw_vsi_id =
6677 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6678 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6679 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6681 /* Update the previous switch rule of "MAC forward to VSI" to
6682 * "MAC fwd to VSI list"
6684 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6686 ice_debug(hw, ICE_DBG_SW,
6687 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6688 tmp_fltr.fwd_id.hw_vsi_id, status);
6692 /* Remove the VSI list since it is no longer used */
6693 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6695 ice_debug(hw, ICE_DBG_SW,
6696 "Failed to remove VSI list %d, error %d\n",
6697 vsi_list_id, status);
6701 LIST_DEL(&vsi_list_info->list_entry);
6702 ice_free(hw, vsi_list_info);
6703 fm_list->vsi_list_info = NULL;
6710 * ice_rem_adv_rule - removes existing advanced switch rule
6711 * @hw: pointer to the hardware structure
6712 * @lkups: information on the words that needs to be looked up. All words
6713 * together makes one recipe
6714 * @lkups_cnt: num of entries in the lkups array
6715 * @rinfo: Its the pointer to the rule information for the rule
6717 * This function can be used to remove 1 rule at a time. The lkups is
6718 * used to describe all the words that forms the "lookup" portion of the
6719 * rule. These words can span multiple protocols. Callers to this function
6720 * need to pass in a list of protocol headers with lookup information along
6721 * and mask that determines which words are valid from the given protocol
6722 * header. rinfo describes other information related to this rule such as
6723 * forwarding IDs, priority of this rule, etc.
6726 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6727 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6729 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6730 struct ice_prot_lkup_ext lkup_exts;
6731 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6732 enum ice_status status = ICE_SUCCESS;
6733 bool remove_rule = false;
6734 u16 i, rid, vsi_handle;
6736 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6737 for (i = 0; i < lkups_cnt; i++) {
6740 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6743 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6748 /* Create any special protocol/offset pairs, such as looking at tunnel
6749 * bits by extracting metadata
6751 status = ice_add_special_words(rinfo, &lkup_exts);
6755 rid = ice_find_recp(hw, &lkup_exts);
6756 /* If did not find a recipe that match the existing criteria */
6757 if (rid == ICE_MAX_NUM_RECIPES)
6758 return ICE_ERR_PARAM;
6760 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6761 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6762 /* the rule is already removed */
6765 ice_acquire_lock(rule_lock);
6766 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6768 } else if (list_elem->vsi_count > 1) {
6769 list_elem->vsi_list_info->ref_cnt--;
6770 remove_rule = false;
6771 vsi_handle = rinfo->sw_act.vsi_handle;
6772 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6774 vsi_handle = rinfo->sw_act.vsi_handle;
6775 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6777 ice_release_lock(rule_lock);
6780 if (list_elem->vsi_count == 0)
6783 ice_release_lock(rule_lock);
6785 struct ice_aqc_sw_rules_elem *s_rule;
6788 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6790 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6793 return ICE_ERR_NO_MEMORY;
6794 s_rule->pdata.lkup_tx_rx.act = 0;
6795 s_rule->pdata.lkup_tx_rx.index =
6796 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6797 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6798 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6800 ice_aqc_opc_remove_sw_rules, NULL);
6801 if (status == ICE_SUCCESS) {
6802 ice_acquire_lock(rule_lock);
6803 LIST_DEL(&list_elem->list_entry);
6804 ice_free(hw, list_elem->lkups);
6805 ice_free(hw, list_elem);
6806 ice_release_lock(rule_lock);
6808 ice_free(hw, s_rule);
6814 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6815 * @hw: pointer to the hardware structure
6816 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6818 * This function is used to remove 1 rule at a time. The removal is based on
6819 * the remove_entry parameter. This function will remove rule for a given
6820 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6823 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6824 struct ice_rule_query_data *remove_entry)
6826 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6827 struct LIST_HEAD_TYPE *list_head;
6828 struct ice_adv_rule_info rinfo;
6829 struct ice_switch_info *sw;
6831 sw = hw->switch_info;
6832 if (!sw->recp_list[remove_entry->rid].recp_created)
6833 return ICE_ERR_PARAM;
6834 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6835 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6837 if (list_itr->rule_info.fltr_rule_id ==
6838 remove_entry->rule_id) {
6839 rinfo = list_itr->rule_info;
6840 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6841 return ice_rem_adv_rule(hw, list_itr->lkups,
6842 list_itr->lkups_cnt, &rinfo);
6845 return ICE_ERR_PARAM;
6849 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6851 * @hw: pointer to the hardware structure
6852 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6854 * This function is used to remove all the rules for a given VSI and as soon
6855 * as removing a rule fails, it will return immediately with the error code,
6856 * else it will return ICE_SUCCESS
6859 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6861 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6862 struct ice_vsi_list_map_info *map_info;
6863 struct LIST_HEAD_TYPE *list_head;
6864 struct ice_adv_rule_info rinfo;
6865 struct ice_switch_info *sw;
6866 enum ice_status status;
6867 u16 vsi_list_id = 0;
6870 sw = hw->switch_info;
6871 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6872 if (!sw->recp_list[rid].recp_created)
6874 if (!sw->recp_list[rid].adv_rule)
6876 list_head = &sw->recp_list[rid].filt_rules;
6878 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6879 ice_adv_fltr_mgmt_list_entry, list_entry) {
6880 map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
6885 rinfo = list_itr->rule_info;
6886 rinfo.sw_act.vsi_handle = vsi_handle;
6887 status = ice_rem_adv_rule(hw, list_itr->lkups,
6888 list_itr->lkups_cnt, &rinfo);
6898 * ice_replay_fltr - Replay all the filters stored by a specific list head
6899 * @hw: pointer to the hardware structure
6900 * @list_head: list for which filters needs to be replayed
6901 * @recp_id: Recipe ID for which rules need to be replayed
6903 static enum ice_status
6904 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6906 struct ice_fltr_mgmt_list_entry *itr;
6907 enum ice_status status = ICE_SUCCESS;
6908 struct ice_sw_recipe *recp_list;
6909 u8 lport = hw->port_info->lport;
6910 struct LIST_HEAD_TYPE l_head;
6912 if (LIST_EMPTY(list_head))
6915 recp_list = &hw->switch_info->recp_list[recp_id];
6916 /* Move entries from the given list_head to a temporary l_head so that
6917 * they can be replayed. Otherwise when trying to re-add the same
6918 * filter, the function will return already exists
6920 LIST_REPLACE_INIT(list_head, &l_head);
6922 /* Mark the given list_head empty by reinitializing it so filters
6923 * could be added again by *handler
6925 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6927 struct ice_fltr_list_entry f_entry;
6929 f_entry.fltr_info = itr->fltr_info;
6930 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6931 status = ice_add_rule_internal(hw, recp_list, lport,
6933 if (status != ICE_SUCCESS)
6938 /* Add a filter per VSI separately */
6943 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6945 if (!ice_is_vsi_valid(hw, vsi_handle))
6948 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6949 f_entry.fltr_info.vsi_handle = vsi_handle;
6950 f_entry.fltr_info.fwd_id.hw_vsi_id =
6951 ice_get_hw_vsi_num(hw, vsi_handle);
6952 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6953 if (recp_id == ICE_SW_LKUP_VLAN)
6954 status = ice_add_vlan_internal(hw, recp_list,
6957 status = ice_add_rule_internal(hw, recp_list,
6960 if (status != ICE_SUCCESS)
6965 /* Clear the filter management list */
6966 ice_rem_sw_rule_info(hw, &l_head);
6971 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6972 * @hw: pointer to the hardware structure
6974 * NOTE: This function does not clean up partially added filters on error.
6975 * It is up to caller of the function to issue a reset or fail early.
6977 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6979 struct ice_switch_info *sw = hw->switch_info;
6980 enum ice_status status = ICE_SUCCESS;
6983 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6984 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6986 status = ice_replay_fltr(hw, i, head);
6987 if (status != ICE_SUCCESS)
6994 * ice_replay_vsi_fltr - Replay filters for requested VSI
6995 * @hw: pointer to the hardware structure
6996 * @vsi_handle: driver VSI handle
6997 * @recp_id: Recipe ID for which rules need to be replayed
6998 * @list_head: list for which filters need to be replayed
7000 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
7001 * It is required to pass valid VSI handle.
7003 static enum ice_status
7004 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
7005 struct LIST_HEAD_TYPE *list_head)
7007 struct ice_fltr_mgmt_list_entry *itr;
7008 enum ice_status status = ICE_SUCCESS;
7009 struct ice_sw_recipe *recp_list;
7012 if (LIST_EMPTY(list_head))
7014 recp_list = &hw->switch_info->recp_list[recp_id];
7015 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
7017 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
7019 struct ice_fltr_list_entry f_entry;
7021 f_entry.fltr_info = itr->fltr_info;
7022 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
7023 itr->fltr_info.vsi_handle == vsi_handle) {
7024 /* update the src in case it is VSI num */
7025 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7026 f_entry.fltr_info.src = hw_vsi_id;
7027 status = ice_add_rule_internal(hw, recp_list,
7028 hw->port_info->lport,
7030 if (status != ICE_SUCCESS)
7034 if (!itr->vsi_list_info ||
7035 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
7037 /* Clearing it so that the logic can add it back */
7038 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
7039 f_entry.fltr_info.vsi_handle = vsi_handle;
7040 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
7041 /* update the src in case it is VSI num */
7042 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
7043 f_entry.fltr_info.src = hw_vsi_id;
7044 if (recp_id == ICE_SW_LKUP_VLAN)
7045 status = ice_add_vlan_internal(hw, recp_list, &f_entry);
7047 status = ice_add_rule_internal(hw, recp_list,
7048 hw->port_info->lport,
7050 if (status != ICE_SUCCESS)
7058 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
7059 * @hw: pointer to the hardware structure
7060 * @vsi_handle: driver VSI handle
7061 * @list_head: list for which filters need to be replayed
7063 * Replay the advanced rule for the given VSI.
7065 static enum ice_status
7066 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
7067 struct LIST_HEAD_TYPE *list_head)
7069 struct ice_rule_query_data added_entry = { 0 };
7070 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
7071 enum ice_status status = ICE_SUCCESS;
7073 if (LIST_EMPTY(list_head))
7075 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
7077 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
7078 u16 lk_cnt = adv_fltr->lkups_cnt;
7080 if (vsi_handle != rinfo->sw_act.vsi_handle)
7082 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
7091 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
7092 * @hw: pointer to the hardware structure
7093 * @vsi_handle: driver VSI handle
7095 * Replays filters for requested VSI via vsi_handle.
7097 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
7099 struct ice_switch_info *sw = hw->switch_info;
7100 enum ice_status status;
7103 /* Update the recipes that were created */
7104 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7105 struct LIST_HEAD_TYPE *head;
7107 head = &sw->recp_list[i].filt_replay_rules;
7108 if (!sw->recp_list[i].adv_rule)
7109 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
7111 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
7112 if (status != ICE_SUCCESS)
7120 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
7121 * @hw: pointer to the HW struct
7123 * Deletes the filter replay rules.
7125 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
7127 struct ice_switch_info *sw = hw->switch_info;
7133 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
7134 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
7135 struct LIST_HEAD_TYPE *l_head;
7137 l_head = &sw->recp_list[i].filt_replay_rules;
7138 if (!sw->recp_list[i].adv_rule)
7139 ice_rem_sw_rule_info(hw, l_head);
7141 ice_rem_adv_rule_info(hw, l_head);