1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
61 struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
69 { ICE_PROTOCOL_LAST, 0 },
73 u8 dummy_gre_tcp_packet[] = {
74 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
75 0x00, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00,
78 0x08, 0x00, /* ICE_ETYPE_OL 12 */
80 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x2F, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00,
86 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
87 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
90 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00,
94 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x06, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
101 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00,
103 0x50, 0x02, 0x20, 0x00,
104 0x00, 0x00, 0x00, 0x00
108 struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
110 { ICE_ETYPE_OL, 12 },
111 { ICE_IPV4_OFOS, 14 },
115 { ICE_UDP_ILOS, 76 },
116 { ICE_PROTOCOL_LAST, 0 },
120 u8 dummy_gre_udp_packet[] = {
121 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
122 0x00, 0x00, 0x00, 0x00,
123 0x00, 0x00, 0x00, 0x00,
125 0x08, 0x00, /* ICE_ETYPE_OL 12 */
127 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
128 0x00, 0x00, 0x00, 0x00,
129 0x00, 0x2F, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
133 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
141 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x11, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00,
145 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
148 0x00, 0x08, 0x00, 0x00,
152 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
154 { ICE_ETYPE_OL, 12 },
155 { ICE_IPV4_OFOS, 14 },
159 { ICE_VXLAN_GPE, 42 },
163 { ICE_PROTOCOL_LAST, 0 },
167 u8 dummy_udp_tun_tcp_packet[] = {
168 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
169 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00,
172 0x08, 0x00, /* ICE_ETYPE_OL 12 */
174 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
175 0x00, 0x01, 0x00, 0x00,
176 0x40, 0x11, 0x00, 0x00,
177 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
181 0x00, 0x46, 0x00, 0x00,
183 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
184 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
187 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
191 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
192 0x00, 0x01, 0x00, 0x00,
193 0x40, 0x06, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0x00, 0x00,
197 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
198 0x00, 0x00, 0x00, 0x00,
199 0x00, 0x00, 0x00, 0x00,
200 0x50, 0x02, 0x20, 0x00,
201 0x00, 0x00, 0x00, 0x00
205 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
207 { ICE_ETYPE_OL, 12 },
208 { ICE_IPV4_OFOS, 14 },
212 { ICE_VXLAN_GPE, 42 },
215 { ICE_UDP_ILOS, 84 },
216 { ICE_PROTOCOL_LAST, 0 },
220 u8 dummy_udp_tun_udp_packet[] = {
221 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x08, 0x00, /* ICE_ETYPE_OL 12 */
227 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
228 0x00, 0x01, 0x00, 0x00,
229 0x00, 0x11, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
234 0x00, 0x3a, 0x00, 0x00,
236 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
237 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
240 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
244 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
245 0x00, 0x01, 0x00, 0x00,
246 0x00, 0x11, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
251 0x00, 0x08, 0x00, 0x00,
254 /* offset info for MAC + IPv4 + UDP dummy packet */
255 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
257 { ICE_ETYPE_OL, 12 },
258 { ICE_IPV4_OFOS, 14 },
259 { ICE_UDP_ILOS, 34 },
260 { ICE_PROTOCOL_LAST, 0 },
263 /* Dummy packet for MAC + IPv4 + UDP */
264 static const u8 dummy_udp_packet[] = {
265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x08, 0x00, /* ICE_ETYPE_OL 12 */
271 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
272 0x00, 0x01, 0x00, 0x00,
273 0x00, 0x11, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00,
277 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
278 0x00, 0x08, 0x00, 0x00,
280 0x00, 0x00, /* 2 bytes for 4 byte alignment */
283 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
284 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
286 { ICE_ETYPE_OL, 12 },
287 { ICE_VLAN_OFOS, 14 },
288 { ICE_IPV4_OFOS, 18 },
289 { ICE_UDP_ILOS, 38 },
290 { ICE_PROTOCOL_LAST, 0 },
293 /* C-tag (801.1Q), IPv4:UDP dummy packet */
294 static const u8 dummy_vlan_udp_packet[] = {
295 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
296 0x00, 0x00, 0x00, 0x00,
297 0x00, 0x00, 0x00, 0x00,
299 0x81, 0x00, /* ICE_ETYPE_OL 12 */
301 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
303 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
304 0x00, 0x01, 0x00, 0x00,
305 0x00, 0x11, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
310 0x00, 0x08, 0x00, 0x00,
312 0x00, 0x00, /* 2 bytes for 4 byte alignment */
315 /* offset info for MAC + IPv4 + TCP dummy packet */
316 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
318 { ICE_ETYPE_OL, 12 },
319 { ICE_IPV4_OFOS, 14 },
321 { ICE_PROTOCOL_LAST, 0 },
324 /* Dummy packet for MAC + IPv4 + TCP */
325 static const u8 dummy_tcp_packet[] = {
326 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x08, 0x00, /* ICE_ETYPE_OL 12 */
332 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
333 0x00, 0x01, 0x00, 0x00,
334 0x00, 0x06, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
339 0x00, 0x00, 0x00, 0x00,
340 0x00, 0x00, 0x00, 0x00,
341 0x50, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, /* 2 bytes for 4 byte alignment */
347 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
348 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
350 { ICE_ETYPE_OL, 12 },
351 { ICE_VLAN_OFOS, 14 },
352 { ICE_IPV4_OFOS, 18 },
354 { ICE_PROTOCOL_LAST, 0 },
357 /* C-tag (801.1Q), IPv4:TCP dummy packet */
358 static const u8 dummy_vlan_tcp_packet[] = {
359 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
360 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00,
363 0x81, 0x00, /* ICE_ETYPE_OL 12 */
365 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
367 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
368 0x00, 0x01, 0x00, 0x00,
369 0x00, 0x06, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
374 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00,
376 0x50, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, /* 2 bytes for 4 byte alignment */
382 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
384 { ICE_ETYPE_OL, 12 },
385 { ICE_IPV6_OFOS, 14 },
387 { ICE_PROTOCOL_LAST, 0 },
391 dummy_tcp_ipv6_packet[] = {
392 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
396 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
398 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
399 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
400 0x00, 0x00, 0x00, 0x00,
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
410 0x00, 0x00, 0x00, 0x00,
411 0x00, 0x00, 0x00, 0x00,
412 0x50, 0x00, 0x00, 0x00,
413 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, /* 2 bytes for 4 byte alignment */
418 /* C-tag (802.1Q): IPv6 + TCP */
419 static const struct ice_dummy_pkt_offsets
420 dummy_vlan_tcp_ipv6_packet_offsets[] = {
422 { ICE_ETYPE_OL, 12 },
423 { ICE_VLAN_OFOS, 14 },
424 { ICE_IPV6_OFOS, 18 },
426 { ICE_PROTOCOL_LAST, 0 },
429 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
430 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
431 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
435 0x81, 0x00, /* ICE_ETYPE_OL 12 */
437 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
439 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
440 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x00, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
446 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, 0x00, 0x00,
448 0x00, 0x00, 0x00, 0x00,
450 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
451 0x00, 0x00, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00,
453 0x50, 0x00, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, /* 2 bytes for 4 byte alignment */
460 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
462 { ICE_ETYPE_OL, 12 },
463 { ICE_IPV6_OFOS, 14 },
464 { ICE_UDP_ILOS, 54 },
465 { ICE_PROTOCOL_LAST, 0 },
469 dummy_udp_ipv6_packet[] = {
470 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
474 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
476 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
477 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
478 0x00, 0x00, 0x00, 0x00,
479 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00,
481 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
484 0x00, 0x00, 0x00, 0x00,
485 0x00, 0x00, 0x00, 0x00,
487 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
488 0x00, 0x08, 0x00, 0x00,
490 0x00, 0x00, /* 2 bytes for 4 byte alignment */
493 /* C-tag (802.1Q): IPv6 + UDP */
494 static const struct ice_dummy_pkt_offsets
495 dummy_vlan_udp_ipv6_packet_offsets[] = {
497 { ICE_ETYPE_OL, 12 },
498 { ICE_VLAN_OFOS, 14 },
499 { ICE_IPV6_OFOS, 18 },
500 { ICE_UDP_ILOS, 58 },
501 { ICE_PROTOCOL_LAST, 0 },
504 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
505 static const u8 dummy_vlan_udp_ipv6_packet[] = {
506 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
507 0x00, 0x00, 0x00, 0x00,
508 0x00, 0x00, 0x00, 0x00,
510 0x81, 0x00, /* ICE_ETYPE_OL 12 */
512 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
514 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
515 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
516 0x00, 0x00, 0x00, 0x00,
517 0x00, 0x00, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00,
521 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
526 0x00, 0x08, 0x00, 0x00,
528 0x00, 0x00, /* 2 bytes for 4 byte alignment */
531 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
533 { ICE_IPV4_OFOS, 14 },
536 { ICE_PROTOCOL_LAST, 0 },
540 dummy_udp_gtp_packet[] = {
541 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
542 0x00, 0x00, 0x00, 0x00,
543 0x00, 0x00, 0x00, 0x00,
546 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
547 0x00, 0x00, 0x00, 0x00,
548 0x00, 0x11, 0x00, 0x00,
549 0x00, 0x00, 0x00, 0x00,
550 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
553 0x00, 0x1c, 0x00, 0x00,
555 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
556 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x85,
559 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
560 0x00, 0x00, 0x00, 0x00,
564 struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
566 { ICE_ETYPE_OL, 12 },
567 { ICE_VLAN_OFOS, 14},
569 { ICE_PROTOCOL_LAST, 0 },
573 dummy_pppoe_packet[] = {
574 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
575 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00,
578 0x81, 0x00, /* ICE_ETYPE_OL 12 */
580 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
582 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
585 0x00, 0x21, /* PPP Link Layer 24 */
587 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
588 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
596 /* this is a recipe to profile association bitmap */
597 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
598 ICE_MAX_NUM_PROFILES);
600 /* this is a profile to recipe association bitmap */
601 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
602 ICE_MAX_NUM_RECIPES);
604 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
607 * ice_collect_result_idx - copy result index values
608 * @buf: buffer that contains the result index
609 * @recp: the recipe struct to copy data into
611 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
612 struct ice_sw_recipe *recp)
614 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
615 ice_set_bit(buf->content.result_indx &
616 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
620 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
621 * @hw: pointer to hardware structure
622 * @recps: struct that we need to populate
623 * @rid: recipe ID that we are populating
624 * @refresh_required: true if we should get recipe to profile mapping from FW
626 * This function is used to populate all the necessary entries into our
627 * bookkeeping so that we have a current list of all the recipes that are
628 * programmed in the firmware.
630 static enum ice_status
631 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
632 bool *refresh_required)
634 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
635 struct ice_aqc_recipe_data_elem *tmp;
636 u16 num_recps = ICE_MAX_NUM_RECIPES;
637 struct ice_prot_lkup_ext *lkup_exts;
638 u16 i, sub_recps, fv_word_idx = 0;
639 enum ice_status status;
641 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
643 /* we need a buffer big enough to accommodate all the recipes */
644 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
645 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
647 return ICE_ERR_NO_MEMORY;
649 tmp[0].recipe_indx = rid;
650 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
651 /* non-zero status meaning recipe doesn't exist */
655 /* Get recipe to profile map so that we can get the fv from lkups that
656 * we read for a recipe from FW. Since we want to minimize the number of
657 * times we make this FW call, just make one call and cache the copy
658 * until a new recipe is added. This operation is only required the
659 * first time to get the changes from FW. Then to search existing
660 * entries we don't need to update the cache again until another recipe
663 if (*refresh_required) {
664 ice_get_recp_to_prof_map(hw);
665 *refresh_required = false;
668 /* Start populating all the entries for recps[rid] based on lkups from
669 * firmware. Note that we are only creating the root recipe in our
672 lkup_exts = &recps[rid].lkup_exts;
674 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
675 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
676 struct ice_recp_grp_entry *rg_entry;
677 u8 prof, idx, prot = 0;
681 rg_entry = (struct ice_recp_grp_entry *)
682 ice_malloc(hw, sizeof(*rg_entry));
684 status = ICE_ERR_NO_MEMORY;
688 idx = root_bufs.recipe_indx;
689 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
691 /* Mark all result indices in this chain */
692 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
693 ice_set_bit(root_bufs.content.result_indx &
694 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
696 /* get the first profile that is associated with rid */
697 prof = ice_find_first_bit(recipe_to_profile[idx],
698 ICE_MAX_NUM_PROFILES);
699 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
700 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
702 rg_entry->fv_idx[i] = lkup_indx;
703 rg_entry->fv_mask[i] =
704 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
706 /* If the recipe is a chained recipe then all its
707 * child recipe's result will have a result index.
708 * To fill fv_words we should not use those result
709 * index, we only need the protocol ids and offsets.
710 * We will skip all the fv_idx which stores result
711 * index in them. We also need to skip any fv_idx which
712 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
713 * valid offset value.
715 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
716 rg_entry->fv_idx[i]) ||
717 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
718 rg_entry->fv_idx[i] == 0)
721 ice_find_prot_off(hw, ICE_BLK_SW, prof,
722 rg_entry->fv_idx[i], &prot, &off);
723 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
724 lkup_exts->fv_words[fv_word_idx].off = off;
727 /* populate rg_list with the data from the child entry of this
730 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
732 /* Propagate some data to the recipe database */
733 recps[idx].is_root = is_root;
734 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
735 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
736 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
737 recps[idx].chain_idx = root_bufs.content.result_indx &
738 ~ICE_AQ_RECIPE_RESULT_EN;
739 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
741 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
747 /* Only do the following for root recipes entries */
748 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
749 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
750 recps[idx].root_rid = root_bufs.content.rid &
751 ~ICE_AQ_RECIPE_ID_IS_ROOT;
752 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
755 /* Complete initialization of the root recipe entry */
756 lkup_exts->n_val_words = fv_word_idx;
757 recps[rid].big_recp = (num_recps > 1);
758 recps[rid].n_grp_count = num_recps;
759 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
760 ice_memdup(hw, tmp, recps[rid].n_grp_count *
761 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
762 if (!recps[rid].root_buf)
765 /* Copy result indexes */
766 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
767 recps[rid].recp_created = true;
775 * ice_get_recp_to_prof_map - updates recipe to profile mapping
776 * @hw: pointer to hardware structure
778 * This function is used to populate recipe_to_profile matrix where index to
779 * this array is the recipe ID and the element is the mapping of which profiles
780 * is this recipe mapped to.
783 ice_get_recp_to_prof_map(struct ice_hw *hw)
785 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
788 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
791 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
792 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
793 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
795 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
796 ICE_MAX_NUM_RECIPES);
797 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
798 if (ice_is_bit_set(r_bitmap, j))
799 ice_set_bit(i, recipe_to_profile[j]);
804 * ice_init_def_sw_recp - initialize the recipe book keeping tables
805 * @hw: pointer to the HW struct
807 * Allocate memory for the entire recipe table and initialize the structures/
808 * entries corresponding to basic recipes.
810 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
812 struct ice_sw_recipe *recps;
815 recps = (struct ice_sw_recipe *)
816 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
818 return ICE_ERR_NO_MEMORY;
820 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
821 recps[i].root_rid = i;
822 INIT_LIST_HEAD(&recps[i].filt_rules);
823 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
824 INIT_LIST_HEAD(&recps[i].rg_list);
825 ice_init_lock(&recps[i].filt_rule_lock);
828 hw->switch_info->recp_list = recps;
834 * ice_aq_get_sw_cfg - get switch configuration
835 * @hw: pointer to the hardware structure
836 * @buf: pointer to the result buffer
837 * @buf_size: length of the buffer available for response
838 * @req_desc: pointer to requested descriptor
839 * @num_elems: pointer to number of elements
840 * @cd: pointer to command details structure or NULL
842 * Get switch configuration (0x0200) to be placed in 'buff'.
843 * This admin command returns information such as initial VSI/port number
844 * and switch ID it belongs to.
846 * NOTE: *req_desc is both an input/output parameter.
847 * The caller of this function first calls this function with *request_desc set
848 * to 0. If the response from f/w has *req_desc set to 0, all the switch
849 * configuration information has been returned; if non-zero (meaning not all
850 * the information was returned), the caller should call this function again
851 * with *req_desc set to the previous value returned by f/w to get the
852 * next block of switch configuration information.
854 * *num_elems is output only parameter. This reflects the number of elements
855 * in response buffer. The caller of this function to use *num_elems while
856 * parsing the response buffer.
858 static enum ice_status
859 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
860 u16 buf_size, u16 *req_desc, u16 *num_elems,
861 struct ice_sq_cd *cd)
863 struct ice_aqc_get_sw_cfg *cmd;
864 enum ice_status status;
865 struct ice_aq_desc desc;
867 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
868 cmd = &desc.params.get_sw_conf;
869 cmd->element = CPU_TO_LE16(*req_desc);
871 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
873 *req_desc = LE16_TO_CPU(cmd->element);
874 *num_elems = LE16_TO_CPU(cmd->num_elems);
881 * ice_alloc_sw - allocate resources specific to switch
882 * @hw: pointer to the HW struct
883 * @ena_stats: true to turn on VEB stats
884 * @shared_res: true for shared resource, false for dedicated resource
885 * @sw_id: switch ID returned
886 * @counter_id: VEB counter ID returned
888 * allocates switch resources (SWID and VEB counter) (0x0208)
891 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
894 struct ice_aqc_alloc_free_res_elem *sw_buf;
895 struct ice_aqc_res_elem *sw_ele;
896 enum ice_status status;
899 buf_len = sizeof(*sw_buf);
900 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
901 ice_malloc(hw, buf_len);
903 return ICE_ERR_NO_MEMORY;
905 /* Prepare buffer for switch ID.
906 * The number of resource entries in buffer is passed as 1 since only a
907 * single switch/VEB instance is allocated, and hence a single sw_id
910 sw_buf->num_elems = CPU_TO_LE16(1);
912 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
913 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
914 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
916 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
917 ice_aqc_opc_alloc_res, NULL);
920 goto ice_alloc_sw_exit;
922 sw_ele = &sw_buf->elem[0];
923 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
926 /* Prepare buffer for VEB Counter */
927 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
928 struct ice_aqc_alloc_free_res_elem *counter_buf;
929 struct ice_aqc_res_elem *counter_ele;
931 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
932 ice_malloc(hw, buf_len);
934 status = ICE_ERR_NO_MEMORY;
935 goto ice_alloc_sw_exit;
938 /* The number of resource entries in buffer is passed as 1 since
939 * only a single switch/VEB instance is allocated, and hence a
940 * single VEB counter is requested.
942 counter_buf->num_elems = CPU_TO_LE16(1);
943 counter_buf->res_type =
944 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
945 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
946 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
950 ice_free(hw, counter_buf);
951 goto ice_alloc_sw_exit;
953 counter_ele = &counter_buf->elem[0];
954 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
955 ice_free(hw, counter_buf);
959 ice_free(hw, sw_buf);
964 * ice_free_sw - free resources specific to switch
965 * @hw: pointer to the HW struct
966 * @sw_id: switch ID returned
967 * @counter_id: VEB counter ID returned
969 * free switch resources (SWID and VEB counter) (0x0209)
971 * NOTE: This function frees multiple resources. It continues
972 * releasing other resources even after it encounters error.
973 * The error code returned is the last error it encountered.
975 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
977 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
978 enum ice_status status, ret_status;
981 buf_len = sizeof(*sw_buf);
982 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
983 ice_malloc(hw, buf_len);
985 return ICE_ERR_NO_MEMORY;
987 /* Prepare buffer to free for switch ID res.
988 * The number of resource entries in buffer is passed as 1 since only a
989 * single switch/VEB instance is freed, and hence a single sw_id
992 sw_buf->num_elems = CPU_TO_LE16(1);
993 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
994 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
996 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
997 ice_aqc_opc_free_res, NULL);
1000 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1002 /* Prepare buffer to free for VEB Counter resource */
1003 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1004 ice_malloc(hw, buf_len);
1006 ice_free(hw, sw_buf);
1007 return ICE_ERR_NO_MEMORY;
1010 /* The number of resource entries in buffer is passed as 1 since only a
1011 * single switch/VEB instance is freed, and hence a single VEB counter
1014 counter_buf->num_elems = CPU_TO_LE16(1);
1015 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1016 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1018 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1019 ice_aqc_opc_free_res, NULL);
1021 ice_debug(hw, ICE_DBG_SW,
1022 "VEB counter resource could not be freed\n");
1023 ret_status = status;
1026 ice_free(hw, counter_buf);
1027 ice_free(hw, sw_buf);
1033 * @hw: pointer to the HW struct
1034 * @vsi_ctx: pointer to a VSI context struct
1035 * @cd: pointer to command details structure or NULL
1037 * Add a VSI context to the hardware (0x0210)
1040 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1041 struct ice_sq_cd *cd)
1043 struct ice_aqc_add_update_free_vsi_resp *res;
1044 struct ice_aqc_add_get_update_free_vsi *cmd;
1045 struct ice_aq_desc desc;
1046 enum ice_status status;
1048 cmd = &desc.params.vsi_cmd;
1049 res = &desc.params.add_update_free_vsi_res;
1051 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1053 if (!vsi_ctx->alloc_from_pool)
1054 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1055 ICE_AQ_VSI_IS_VALID);
1057 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1059 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1061 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1062 sizeof(vsi_ctx->info), cd);
1065 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1066 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1067 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1075 * @hw: pointer to the HW struct
1076 * @vsi_ctx: pointer to a VSI context struct
1077 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1078 * @cd: pointer to command details structure or NULL
1080 * Free VSI context info from hardware (0x0213)
1083 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1084 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1086 struct ice_aqc_add_update_free_vsi_resp *resp;
1087 struct ice_aqc_add_get_update_free_vsi *cmd;
1088 struct ice_aq_desc desc;
1089 enum ice_status status;
1091 cmd = &desc.params.vsi_cmd;
1092 resp = &desc.params.add_update_free_vsi_res;
1094 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1096 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1098 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1100 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1102 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1103 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1111 * @hw: pointer to the HW struct
1112 * @vsi_ctx: pointer to a VSI context struct
1113 * @cd: pointer to command details structure or NULL
1115 * Update VSI context in the hardware (0x0211)
1118 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1119 struct ice_sq_cd *cd)
1121 struct ice_aqc_add_update_free_vsi_resp *resp;
1122 struct ice_aqc_add_get_update_free_vsi *cmd;
1123 struct ice_aq_desc desc;
1124 enum ice_status status;
1126 cmd = &desc.params.vsi_cmd;
1127 resp = &desc.params.add_update_free_vsi_res;
1129 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1131 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1133 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1135 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1136 sizeof(vsi_ctx->info), cd);
1139 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1140 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1147 * ice_is_vsi_valid - check whether the VSI is valid or not
1148 * @hw: pointer to the HW struct
1149 * @vsi_handle: VSI handle
1151 * check whether the VSI is valid or not
1153 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1155 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1159 * ice_get_hw_vsi_num - return the HW VSI number
1160 * @hw: pointer to the HW struct
1161 * @vsi_handle: VSI handle
1163 * return the HW VSI number
1164 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1166 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1168 return hw->vsi_ctx[vsi_handle]->vsi_num;
1172 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1173 * @hw: pointer to the HW struct
1174 * @vsi_handle: VSI handle
1176 * return the VSI context entry for a given VSI handle
1178 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1180 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1184 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1185 * @hw: pointer to the HW struct
1186 * @vsi_handle: VSI handle
1187 * @vsi: VSI context pointer
1189 * save the VSI context entry for a given VSI handle
1192 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1194 hw->vsi_ctx[vsi_handle] = vsi;
1198 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1199 * @hw: pointer to the HW struct
1200 * @vsi_handle: VSI handle
1202 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1204 struct ice_vsi_ctx *vsi;
1207 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1210 ice_for_each_traffic_class(i) {
1211 if (vsi->lan_q_ctx[i]) {
1212 ice_free(hw, vsi->lan_q_ctx[i]);
1213 vsi->lan_q_ctx[i] = NULL;
1219 * ice_clear_vsi_ctx - clear the VSI context entry
1220 * @hw: pointer to the HW struct
1221 * @vsi_handle: VSI handle
1223 * clear the VSI context entry
1225 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1227 struct ice_vsi_ctx *vsi;
1229 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1231 ice_clear_vsi_q_ctx(hw, vsi_handle);
1233 hw->vsi_ctx[vsi_handle] = NULL;
1238 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1239 * @hw: pointer to the HW struct
1241 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1245 for (i = 0; i < ICE_MAX_VSI; i++)
1246 ice_clear_vsi_ctx(hw, i);
1250 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1251 * @hw: pointer to the HW struct
1252 * @vsi_handle: unique VSI handle provided by drivers
1253 * @vsi_ctx: pointer to a VSI context struct
1254 * @cd: pointer to command details structure or NULL
1256 * Add a VSI context to the hardware also add it into the VSI handle list.
1257 * If this function gets called after reset for existing VSIs then update
1258 * with the new HW VSI number in the corresponding VSI handle list entry.
1261 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1262 struct ice_sq_cd *cd)
1264 struct ice_vsi_ctx *tmp_vsi_ctx;
1265 enum ice_status status;
1267 if (vsi_handle >= ICE_MAX_VSI)
1268 return ICE_ERR_PARAM;
1269 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1272 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1274 /* Create a new VSI context */
1275 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1276 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1278 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1279 return ICE_ERR_NO_MEMORY;
1281 *tmp_vsi_ctx = *vsi_ctx;
1283 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1285 /* update with new HW VSI num */
1286 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1293 * ice_free_vsi- free VSI context from hardware and VSI handle list
1294 * @hw: pointer to the HW struct
1295 * @vsi_handle: unique VSI handle
1296 * @vsi_ctx: pointer to a VSI context struct
1297 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1298 * @cd: pointer to command details structure or NULL
1300 * Free VSI context info from hardware as well as from VSI handle list
1303 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1304 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1306 enum ice_status status;
1308 if (!ice_is_vsi_valid(hw, vsi_handle))
1309 return ICE_ERR_PARAM;
1310 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1311 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1313 ice_clear_vsi_ctx(hw, vsi_handle);
1319 * @hw: pointer to the HW struct
1320 * @vsi_handle: unique VSI handle
1321 * @vsi_ctx: pointer to a VSI context struct
1322 * @cd: pointer to command details structure or NULL
1324 * Update VSI context in the hardware
1327 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1328 struct ice_sq_cd *cd)
1330 if (!ice_is_vsi_valid(hw, vsi_handle))
1331 return ICE_ERR_PARAM;
1332 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1333 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1337 * ice_aq_get_vsi_params
1338 * @hw: pointer to the HW struct
1339 * @vsi_ctx: pointer to a VSI context struct
1340 * @cd: pointer to command details structure or NULL
1342 * Get VSI context info from hardware (0x0212)
1345 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1346 struct ice_sq_cd *cd)
1348 struct ice_aqc_add_get_update_free_vsi *cmd;
1349 struct ice_aqc_get_vsi_resp *resp;
1350 struct ice_aq_desc desc;
1351 enum ice_status status;
1353 cmd = &desc.params.vsi_cmd;
1354 resp = &desc.params.get_vsi_resp;
1356 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1358 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1360 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1361 sizeof(vsi_ctx->info), cd);
1363 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1365 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1366 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1373 * ice_aq_add_update_mir_rule - add/update a mirror rule
1374 * @hw: pointer to the HW struct
1375 * @rule_type: Rule Type
1376 * @dest_vsi: VSI number to which packets will be mirrored
1377 * @count: length of the list
1378 * @mr_buf: buffer for list of mirrored VSI numbers
1379 * @cd: pointer to command details structure or NULL
1382 * Add/Update Mirror Rule (0x260).
1385 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1386 u16 count, struct ice_mir_rule_buf *mr_buf,
1387 struct ice_sq_cd *cd, u16 *rule_id)
1389 struct ice_aqc_add_update_mir_rule *cmd;
1390 struct ice_aq_desc desc;
1391 enum ice_status status;
1392 __le16 *mr_list = NULL;
1395 switch (rule_type) {
1396 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1397 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1398 /* Make sure count and mr_buf are set for these rule_types */
1399 if (!(count && mr_buf))
1400 return ICE_ERR_PARAM;
1402 buf_size = count * sizeof(__le16);
1403 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1405 return ICE_ERR_NO_MEMORY;
1407 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1408 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1409 /* Make sure count and mr_buf are not set for these
1412 if (count || mr_buf)
1413 return ICE_ERR_PARAM;
1416 ice_debug(hw, ICE_DBG_SW,
1417 "Error due to unsupported rule_type %u\n", rule_type);
1418 return ICE_ERR_OUT_OF_RANGE;
1421 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1423 /* Pre-process 'mr_buf' items for add/update of virtual port
1424 * ingress/egress mirroring (but not physical port ingress/egress
1430 for (i = 0; i < count; i++) {
1433 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1435 /* Validate specified VSI number, make sure it is less
1436 * than ICE_MAX_VSI, if not return with error.
1438 if (id >= ICE_MAX_VSI) {
1439 ice_debug(hw, ICE_DBG_SW,
1440 "Error VSI index (%u) out-of-range\n",
1442 ice_free(hw, mr_list);
1443 return ICE_ERR_OUT_OF_RANGE;
1446 /* add VSI to mirror rule */
1449 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1450 else /* remove VSI from mirror rule */
1451 mr_list[i] = CPU_TO_LE16(id);
1455 cmd = &desc.params.add_update_rule;
1456 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1457 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1458 ICE_AQC_RULE_ID_VALID_M);
1459 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1460 cmd->num_entries = CPU_TO_LE16(count);
1461 cmd->dest = CPU_TO_LE16(dest_vsi);
1463 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1465 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1467 ice_free(hw, mr_list);
1473 * ice_aq_delete_mir_rule - delete a mirror rule
1474 * @hw: pointer to the HW struct
1475 * @rule_id: Mirror rule ID (to be deleted)
1476 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1477 * otherwise it is returned to the shared pool
1478 * @cd: pointer to command details structure or NULL
1480 * Delete Mirror Rule (0x261).
1483 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1484 struct ice_sq_cd *cd)
1486 struct ice_aqc_delete_mir_rule *cmd;
1487 struct ice_aq_desc desc;
1489 /* rule_id should be in the range 0...63 */
1490 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1491 return ICE_ERR_OUT_OF_RANGE;
1493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1495 cmd = &desc.params.del_rule;
1496 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1497 cmd->rule_id = CPU_TO_LE16(rule_id);
1500 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1502 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1506 * ice_aq_alloc_free_vsi_list
1507 * @hw: pointer to the HW struct
1508 * @vsi_list_id: VSI list ID returned or used for lookup
1509 * @lkup_type: switch rule filter lookup type
1510 * @opc: switch rules population command type - pass in the command opcode
1512 * allocates or free a VSI list resource
1514 static enum ice_status
1515 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1516 enum ice_sw_lkup_type lkup_type,
1517 enum ice_adminq_opc opc)
1519 struct ice_aqc_alloc_free_res_elem *sw_buf;
1520 struct ice_aqc_res_elem *vsi_ele;
1521 enum ice_status status;
1524 buf_len = sizeof(*sw_buf);
1525 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1526 ice_malloc(hw, buf_len);
1528 return ICE_ERR_NO_MEMORY;
1529 sw_buf->num_elems = CPU_TO_LE16(1);
1531 if (lkup_type == ICE_SW_LKUP_MAC ||
1532 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1533 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1534 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1535 lkup_type == ICE_SW_LKUP_PROMISC ||
1536 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1537 lkup_type == ICE_SW_LKUP_LAST) {
1538 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1539 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1541 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1543 status = ICE_ERR_PARAM;
1544 goto ice_aq_alloc_free_vsi_list_exit;
1547 if (opc == ice_aqc_opc_free_res)
1548 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1550 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1552 goto ice_aq_alloc_free_vsi_list_exit;
1554 if (opc == ice_aqc_opc_alloc_res) {
1555 vsi_ele = &sw_buf->elem[0];
1556 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1559 ice_aq_alloc_free_vsi_list_exit:
1560 ice_free(hw, sw_buf);
1565 * ice_aq_set_storm_ctrl - Sets storm control configuration
1566 * @hw: pointer to the HW struct
1567 * @bcast_thresh: represents the upper threshold for broadcast storm control
1568 * @mcast_thresh: represents the upper threshold for multicast storm control
1569 * @ctl_bitmask: storm control control knobs
1571 * Sets the storm control configuration (0x0280)
1574 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1577 struct ice_aqc_storm_cfg *cmd;
1578 struct ice_aq_desc desc;
1580 cmd = &desc.params.storm_conf;
1582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1584 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1585 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1586 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1588 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1592 * ice_aq_get_storm_ctrl - gets storm control configuration
1593 * @hw: pointer to the HW struct
1594 * @bcast_thresh: represents the upper threshold for broadcast storm control
1595 * @mcast_thresh: represents the upper threshold for multicast storm control
1596 * @ctl_bitmask: storm control control knobs
1598 * Gets the storm control configuration (0x0281)
1601 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1604 enum ice_status status;
1605 struct ice_aq_desc desc;
1607 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1609 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1611 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1614 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1617 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1620 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1627 * ice_aq_sw_rules - add/update/remove switch rules
1628 * @hw: pointer to the HW struct
1629 * @rule_list: pointer to switch rule population list
1630 * @rule_list_sz: total size of the rule list in bytes
1631 * @num_rules: number of switch rules in the rule_list
1632 * @opc: switch rules population command type - pass in the command opcode
1633 * @cd: pointer to command details structure or NULL
1635 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1637 static enum ice_status
1638 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1639 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1641 struct ice_aq_desc desc;
1643 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1645 if (opc != ice_aqc_opc_add_sw_rules &&
1646 opc != ice_aqc_opc_update_sw_rules &&
1647 opc != ice_aqc_opc_remove_sw_rules)
1648 return ICE_ERR_PARAM;
1650 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1652 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1653 desc.params.sw_rules.num_rules_fltr_entry_index =
1654 CPU_TO_LE16(num_rules);
1655 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1659 * ice_aq_add_recipe - add switch recipe
1660 * @hw: pointer to the HW struct
1661 * @s_recipe_list: pointer to switch rule population list
1662 * @num_recipes: number of switch recipes in the list
1663 * @cd: pointer to command details structure or NULL
1668 ice_aq_add_recipe(struct ice_hw *hw,
1669 struct ice_aqc_recipe_data_elem *s_recipe_list,
1670 u16 num_recipes, struct ice_sq_cd *cd)
1672 struct ice_aqc_add_get_recipe *cmd;
1673 struct ice_aq_desc desc;
1676 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1677 cmd = &desc.params.add_get_recipe;
1678 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1680 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1681 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1683 buf_size = num_recipes * sizeof(*s_recipe_list);
1685 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1689 * ice_aq_get_recipe - get switch recipe
1690 * @hw: pointer to the HW struct
1691 * @s_recipe_list: pointer to switch rule population list
1692 * @num_recipes: pointer to the number of recipes (input and output)
1693 * @recipe_root: root recipe number of recipe(s) to retrieve
1694 * @cd: pointer to command details structure or NULL
1698 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1699 * On output, *num_recipes will equal the number of entries returned in
1702 * The caller must supply enough space in s_recipe_list to hold all possible
1703 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1706 ice_aq_get_recipe(struct ice_hw *hw,
1707 struct ice_aqc_recipe_data_elem *s_recipe_list,
1708 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1710 struct ice_aqc_add_get_recipe *cmd;
1711 struct ice_aq_desc desc;
1712 enum ice_status status;
1715 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1716 return ICE_ERR_PARAM;
1718 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1719 cmd = &desc.params.add_get_recipe;
1720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1722 cmd->return_index = CPU_TO_LE16(recipe_root);
1723 cmd->num_sub_recipes = 0;
1725 buf_size = *num_recipes * sizeof(*s_recipe_list);
1727 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1728 /* cppcheck-suppress constArgument */
1729 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1735 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1736 * @hw: pointer to the HW struct
1737 * @profile_id: package profile ID to associate the recipe with
1738 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1739 * @cd: pointer to command details structure or NULL
1740 * Recipe to profile association (0x0291)
1743 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1744 struct ice_sq_cd *cd)
1746 struct ice_aqc_recipe_to_profile *cmd;
1747 struct ice_aq_desc desc;
1749 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1750 cmd = &desc.params.recipe_to_profile;
1751 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1752 cmd->profile_id = CPU_TO_LE16(profile_id);
1753 /* Set the recipe ID bit in the bitmask to let the device know which
1754 * profile we are associating the recipe to
1756 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1757 ICE_NONDMA_TO_NONDMA);
1759 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1763 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1764 * @hw: pointer to the HW struct
1765 * @profile_id: package profile ID to associate the recipe with
1766 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1767 * @cd: pointer to command details structure or NULL
1768 * Associate profile ID with given recipe (0x0293)
1771 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1772 struct ice_sq_cd *cd)
1774 struct ice_aqc_recipe_to_profile *cmd;
1775 struct ice_aq_desc desc;
1776 enum ice_status status;
1778 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1779 cmd = &desc.params.recipe_to_profile;
1780 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1781 cmd->profile_id = CPU_TO_LE16(profile_id);
1783 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1785 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1786 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1792 * ice_alloc_recipe - add recipe resource
1793 * @hw: pointer to the hardware structure
1794 * @rid: recipe ID returned as response to AQ call
1796 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1798 struct ice_aqc_alloc_free_res_elem *sw_buf;
1799 enum ice_status status;
1802 buf_len = sizeof(*sw_buf);
1803 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1805 return ICE_ERR_NO_MEMORY;
1807 sw_buf->num_elems = CPU_TO_LE16(1);
1808 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1809 ICE_AQC_RES_TYPE_S) |
1810 ICE_AQC_RES_TYPE_FLAG_SHARED);
1811 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1812 ice_aqc_opc_alloc_res, NULL);
1814 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1815 ice_free(hw, sw_buf);
1820 /* ice_init_port_info - Initialize port_info with switch configuration data
1821 * @pi: pointer to port_info
1822 * @vsi_port_num: VSI number or port number
1823 * @type: Type of switch element (port or VSI)
1824 * @swid: switch ID of the switch the element is attached to
1825 * @pf_vf_num: PF or VF number
1826 * @is_vf: true if the element is a VF, false otherwise
1829 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1830 u16 swid, u16 pf_vf_num, bool is_vf)
1833 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1834 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1836 pi->pf_vf_num = pf_vf_num;
1838 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1839 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1842 ice_debug(pi->hw, ICE_DBG_SW,
1843 "incorrect VSI/port type received\n");
1848 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1849 * @hw: pointer to the hardware structure
1851 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1853 struct ice_aqc_get_sw_cfg_resp *rbuf;
1854 enum ice_status status;
1855 u16 num_total_ports;
1861 num_total_ports = 1;
1863 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1864 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1867 return ICE_ERR_NO_MEMORY;
1869 /* Multiple calls to ice_aq_get_sw_cfg may be required
1870 * to get all the switch configuration information. The need
1871 * for additional calls is indicated by ice_aq_get_sw_cfg
1872 * writing a non-zero value in req_desc
1875 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1876 &req_desc, &num_elems, NULL);
1881 for (i = 0; i < num_elems; i++) {
1882 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1883 u16 pf_vf_num, swid, vsi_port_num;
1887 ele = rbuf[i].elements;
1888 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1889 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1891 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1892 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1894 swid = LE16_TO_CPU(ele->swid);
1896 if (LE16_TO_CPU(ele->pf_vf_num) &
1897 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1900 type = LE16_TO_CPU(ele->vsi_port_num) >>
1901 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1904 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1905 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1906 if (j == num_total_ports) {
1907 ice_debug(hw, ICE_DBG_SW,
1908 "more ports than expected\n");
1909 status = ICE_ERR_CFG;
1912 ice_init_port_info(hw->port_info,
1913 vsi_port_num, type, swid,
1921 } while (req_desc && !status);
1924 ice_free(hw, (void *)rbuf);
1929 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1930 * @hw: pointer to the hardware structure
1931 * @fi: filter info structure to fill/update
1933 * This helper function populates the lb_en and lan_en elements of the provided
1934 * ice_fltr_info struct using the switch's type and characteristics of the
1935 * switch rule being configured.
1937 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1941 if ((fi->flag & ICE_FLTR_TX) &&
1942 (fi->fltr_act == ICE_FWD_TO_VSI ||
1943 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1944 fi->fltr_act == ICE_FWD_TO_Q ||
1945 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1946 /* Setting LB for prune actions will result in replicated
1947 * packets to the internal switch that will be dropped.
1949 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1952 /* Set lan_en to TRUE if
1953 * 1. The switch is a VEB AND
1955 * 2.1 The lookup is a directional lookup like ethertype,
1956 * promiscuous, ethertype-MAC, promiscuous-VLAN
1957 * and default-port OR
1958 * 2.2 The lookup is VLAN, OR
1959 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1960 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1964 * The switch is a VEPA.
1966 * In all other cases, the LAN enable has to be set to false.
1969 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1970 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1971 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1972 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1973 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1974 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1975 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1976 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1977 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1978 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1987 * ice_fill_sw_rule - Helper function to fill switch rule structure
1988 * @hw: pointer to the hardware structure
1989 * @f_info: entry containing packet forwarding information
1990 * @s_rule: switch rule structure to be filled in based on mac_entry
1991 * @opc: switch rules population command type - pass in the command opcode
1994 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1995 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1997 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2005 if (opc == ice_aqc_opc_remove_sw_rules) {
2006 s_rule->pdata.lkup_tx_rx.act = 0;
2007 s_rule->pdata.lkup_tx_rx.index =
2008 CPU_TO_LE16(f_info->fltr_rule_id);
2009 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2013 eth_hdr_sz = sizeof(dummy_eth_header);
2014 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2016 /* initialize the ether header with a dummy header */
2017 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2018 ice_fill_sw_info(hw, f_info);
2020 switch (f_info->fltr_act) {
2021 case ICE_FWD_TO_VSI:
2022 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2023 ICE_SINGLE_ACT_VSI_ID_M;
2024 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2025 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2026 ICE_SINGLE_ACT_VALID_BIT;
2028 case ICE_FWD_TO_VSI_LIST:
2029 act |= ICE_SINGLE_ACT_VSI_LIST;
2030 act |= (f_info->fwd_id.vsi_list_id <<
2031 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2032 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2033 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2034 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2035 ICE_SINGLE_ACT_VALID_BIT;
2038 act |= ICE_SINGLE_ACT_TO_Q;
2039 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2040 ICE_SINGLE_ACT_Q_INDEX_M;
2042 case ICE_DROP_PACKET:
2043 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2044 ICE_SINGLE_ACT_VALID_BIT;
2046 case ICE_FWD_TO_QGRP:
2047 q_rgn = f_info->qgrp_size > 0 ?
2048 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2049 act |= ICE_SINGLE_ACT_TO_Q;
2050 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2051 ICE_SINGLE_ACT_Q_INDEX_M;
2052 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2053 ICE_SINGLE_ACT_Q_REGION_M;
2060 act |= ICE_SINGLE_ACT_LB_ENABLE;
2062 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2064 switch (f_info->lkup_type) {
2065 case ICE_SW_LKUP_MAC:
2066 daddr = f_info->l_data.mac.mac_addr;
2068 case ICE_SW_LKUP_VLAN:
2069 vlan_id = f_info->l_data.vlan.vlan_id;
2070 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2071 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2072 act |= ICE_SINGLE_ACT_PRUNE;
2073 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2076 case ICE_SW_LKUP_ETHERTYPE_MAC:
2077 daddr = f_info->l_data.ethertype_mac.mac_addr;
2079 case ICE_SW_LKUP_ETHERTYPE:
2080 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2081 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2083 case ICE_SW_LKUP_MAC_VLAN:
2084 daddr = f_info->l_data.mac_vlan.mac_addr;
2085 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2087 case ICE_SW_LKUP_PROMISC_VLAN:
2088 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2090 case ICE_SW_LKUP_PROMISC:
2091 daddr = f_info->l_data.mac_vlan.mac_addr;
2097 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2098 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2099 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2101 /* Recipe set depending on lookup type */
2102 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2103 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2104 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2107 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2108 ICE_NONDMA_TO_NONDMA);
2110 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2111 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2112 *off = CPU_TO_BE16(vlan_id);
2115 /* Create the switch rule with the final dummy Ethernet header */
2116 if (opc != ice_aqc_opc_update_sw_rules)
2117 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2121 * ice_add_marker_act
2122 * @hw: pointer to the hardware structure
2123 * @m_ent: the management entry for which sw marker needs to be added
2124 * @sw_marker: sw marker to tag the Rx descriptor with
2125 * @l_id: large action resource ID
2127 * Create a large action to hold software marker and update the switch rule
2128 * entry pointed by m_ent with newly created large action
2130 static enum ice_status
2131 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2132 u16 sw_marker, u16 l_id)
2134 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2135 /* For software marker we need 3 large actions
2136 * 1. FWD action: FWD TO VSI or VSI LIST
2137 * 2. GENERIC VALUE action to hold the profile ID
2138 * 3. GENERIC VALUE action to hold the software marker ID
2140 const u16 num_lg_acts = 3;
2141 enum ice_status status;
2147 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2148 return ICE_ERR_PARAM;
2150 /* Create two back-to-back switch rules and submit them to the HW using
2151 * one memory buffer:
2155 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2156 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2157 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2159 return ICE_ERR_NO_MEMORY;
2161 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2163 /* Fill in the first switch rule i.e. large action */
2164 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2165 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2166 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2168 /* First action VSI forwarding or VSI list forwarding depending on how
2171 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2172 m_ent->fltr_info.fwd_id.hw_vsi_id;
2174 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2175 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2176 ICE_LG_ACT_VSI_LIST_ID_M;
2177 if (m_ent->vsi_count > 1)
2178 act |= ICE_LG_ACT_VSI_LIST;
2179 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2181 /* Second action descriptor type */
2182 act = ICE_LG_ACT_GENERIC;
2184 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2185 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2187 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2188 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2190 /* Third action Marker value */
2191 act |= ICE_LG_ACT_GENERIC;
2192 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2193 ICE_LG_ACT_GENERIC_VALUE_M;
2195 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2197 /* call the fill switch rule to fill the lookup Tx Rx structure */
2198 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2199 ice_aqc_opc_update_sw_rules);
2201 /* Update the action to point to the large action ID */
2202 rx_tx->pdata.lkup_tx_rx.act =
2203 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2204 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2205 ICE_SINGLE_ACT_PTR_VAL_M));
2207 /* Use the filter rule ID of the previously created rule with single
2208 * act. Once the update happens, hardware will treat this as large
2211 rx_tx->pdata.lkup_tx_rx.index =
2212 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2214 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2215 ice_aqc_opc_update_sw_rules, NULL);
2217 m_ent->lg_act_idx = l_id;
2218 m_ent->sw_marker_id = sw_marker;
2221 ice_free(hw, lg_act);
2226 * ice_add_counter_act - add/update filter rule with counter action
2227 * @hw: pointer to the hardware structure
2228 * @m_ent: the management entry for which counter needs to be added
2229 * @counter_id: VLAN counter ID returned as part of allocate resource
2230 * @l_id: large action resource ID
2232 static enum ice_status
2233 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2234 u16 counter_id, u16 l_id)
2236 struct ice_aqc_sw_rules_elem *lg_act;
2237 struct ice_aqc_sw_rules_elem *rx_tx;
2238 enum ice_status status;
2239 /* 2 actions will be added while adding a large action counter */
2240 const int num_acts = 2;
2247 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2248 return ICE_ERR_PARAM;
2250 /* Create two back-to-back switch rules and submit them to the HW using
2251 * one memory buffer:
2255 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2256 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2257 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2260 return ICE_ERR_NO_MEMORY;
2262 rx_tx = (struct ice_aqc_sw_rules_elem *)
2263 ((u8 *)lg_act + lg_act_size);
2265 /* Fill in the first switch rule i.e. large action */
2266 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2267 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2268 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2270 /* First action VSI forwarding or VSI list forwarding depending on how
2273 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2274 m_ent->fltr_info.fwd_id.hw_vsi_id;
2276 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2277 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2278 ICE_LG_ACT_VSI_LIST_ID_M;
2279 if (m_ent->vsi_count > 1)
2280 act |= ICE_LG_ACT_VSI_LIST;
2281 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2283 /* Second action counter ID */
2284 act = ICE_LG_ACT_STAT_COUNT;
2285 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2286 ICE_LG_ACT_STAT_COUNT_M;
2287 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2289 /* call the fill switch rule to fill the lookup Tx Rx structure */
2290 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2291 ice_aqc_opc_update_sw_rules);
2293 act = ICE_SINGLE_ACT_PTR;
2294 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2295 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2297 /* Use the filter rule ID of the previously created rule with single
2298 * act. Once the update happens, hardware will treat this as large
2301 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2302 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2304 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2305 ice_aqc_opc_update_sw_rules, NULL);
2307 m_ent->lg_act_idx = l_id;
2308 m_ent->counter_index = counter_id;
2311 ice_free(hw, lg_act);
2316 * ice_create_vsi_list_map
2317 * @hw: pointer to the hardware structure
2318 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2319 * @num_vsi: number of VSI handles in the array
2320 * @vsi_list_id: VSI list ID generated as part of allocate resource
2322 * Helper function to create a new entry of VSI list ID to VSI mapping
2323 * using the given VSI list ID
2325 static struct ice_vsi_list_map_info *
2326 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2329 struct ice_switch_info *sw = hw->switch_info;
2330 struct ice_vsi_list_map_info *v_map;
2333 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2338 v_map->vsi_list_id = vsi_list_id;
2340 for (i = 0; i < num_vsi; i++)
2341 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2343 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2348 * ice_update_vsi_list_rule
2349 * @hw: pointer to the hardware structure
2350 * @vsi_handle_arr: array of VSI handles to form a VSI list
2351 * @num_vsi: number of VSI handles in the array
2352 * @vsi_list_id: VSI list ID generated as part of allocate resource
2353 * @remove: Boolean value to indicate if this is a remove action
2354 * @opc: switch rules population command type - pass in the command opcode
2355 * @lkup_type: lookup type of the filter
2357 * Call AQ command to add a new switch rule or update existing switch rule
2358 * using the given VSI list ID
2360 static enum ice_status
2361 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2362 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2363 enum ice_sw_lkup_type lkup_type)
2365 struct ice_aqc_sw_rules_elem *s_rule;
2366 enum ice_status status;
2372 return ICE_ERR_PARAM;
2374 if (lkup_type == ICE_SW_LKUP_MAC ||
2375 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2376 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2377 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2378 lkup_type == ICE_SW_LKUP_PROMISC ||
2379 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2380 lkup_type == ICE_SW_LKUP_LAST)
2381 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2382 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2383 else if (lkup_type == ICE_SW_LKUP_VLAN)
2384 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2385 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2387 return ICE_ERR_PARAM;
2389 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2390 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2392 return ICE_ERR_NO_MEMORY;
2393 for (i = 0; i < num_vsi; i++) {
2394 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2395 status = ICE_ERR_PARAM;
2398 /* AQ call requires hw_vsi_id(s) */
2399 s_rule->pdata.vsi_list.vsi[i] =
2400 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2403 s_rule->type = CPU_TO_LE16(type);
2404 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2405 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2407 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2410 ice_free(hw, s_rule);
2415 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2416 * @hw: pointer to the HW struct
2417 * @vsi_handle_arr: array of VSI handles to form a VSI list
2418 * @num_vsi: number of VSI handles in the array
2419 * @vsi_list_id: stores the ID of the VSI list to be created
2420 * @lkup_type: switch rule filter's lookup type
2422 static enum ice_status
2423 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2424 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2426 enum ice_status status;
2428 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2429 ice_aqc_opc_alloc_res);
2433 /* Update the newly created VSI list to include the specified VSIs */
2434 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2435 *vsi_list_id, false,
2436 ice_aqc_opc_add_sw_rules, lkup_type);
2440 * ice_create_pkt_fwd_rule
2441 * @hw: pointer to the hardware structure
2442 * @f_entry: entry containing packet forwarding information
2444 * Create switch rule with given filter information and add an entry
2445 * to the corresponding filter management list to track this switch rule
2448 static enum ice_status
2449 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2450 struct ice_fltr_list_entry *f_entry)
2452 struct ice_fltr_mgmt_list_entry *fm_entry;
2453 struct ice_aqc_sw_rules_elem *s_rule;
2454 enum ice_sw_lkup_type l_type;
2455 struct ice_sw_recipe *recp;
2456 enum ice_status status;
2458 s_rule = (struct ice_aqc_sw_rules_elem *)
2459 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2461 return ICE_ERR_NO_MEMORY;
2462 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2463 ice_malloc(hw, sizeof(*fm_entry));
2465 status = ICE_ERR_NO_MEMORY;
2466 goto ice_create_pkt_fwd_rule_exit;
2469 fm_entry->fltr_info = f_entry->fltr_info;
2471 /* Initialize all the fields for the management entry */
2472 fm_entry->vsi_count = 1;
2473 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2474 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2475 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2477 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2478 ice_aqc_opc_add_sw_rules);
2480 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2481 ice_aqc_opc_add_sw_rules, NULL);
2483 ice_free(hw, fm_entry);
2484 goto ice_create_pkt_fwd_rule_exit;
2487 f_entry->fltr_info.fltr_rule_id =
2488 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2489 fm_entry->fltr_info.fltr_rule_id =
2490 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2492 /* The book keeping entries will get removed when base driver
2493 * calls remove filter AQ command
2495 l_type = fm_entry->fltr_info.lkup_type;
2496 recp = &hw->switch_info->recp_list[l_type];
2497 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2499 ice_create_pkt_fwd_rule_exit:
2500 ice_free(hw, s_rule);
2505 * ice_update_pkt_fwd_rule
2506 * @hw: pointer to the hardware structure
2507 * @f_info: filter information for switch rule
2509 * Call AQ command to update a previously created switch rule with a
2512 static enum ice_status
2513 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2515 struct ice_aqc_sw_rules_elem *s_rule;
2516 enum ice_status status;
2518 s_rule = (struct ice_aqc_sw_rules_elem *)
2519 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2521 return ICE_ERR_NO_MEMORY;
2523 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2525 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2527 /* Update switch rule with new rule set to forward VSI list */
2528 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2529 ice_aqc_opc_update_sw_rules, NULL);
2531 ice_free(hw, s_rule);
2536 * ice_update_sw_rule_bridge_mode
2537 * @hw: pointer to the HW struct
2539 * Updates unicast switch filter rules based on VEB/VEPA mode
2541 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2543 struct ice_switch_info *sw = hw->switch_info;
2544 struct ice_fltr_mgmt_list_entry *fm_entry;
2545 enum ice_status status = ICE_SUCCESS;
2546 struct LIST_HEAD_TYPE *rule_head;
2547 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2549 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2550 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2552 ice_acquire_lock(rule_lock);
2553 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2555 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2556 u8 *addr = fi->l_data.mac.mac_addr;
2558 /* Update unicast Tx rules to reflect the selected
2561 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2562 (fi->fltr_act == ICE_FWD_TO_VSI ||
2563 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2564 fi->fltr_act == ICE_FWD_TO_Q ||
2565 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2566 status = ice_update_pkt_fwd_rule(hw, fi);
2572 ice_release_lock(rule_lock);
2578 * ice_add_update_vsi_list
2579 * @hw: pointer to the hardware structure
2580 * @m_entry: pointer to current filter management list entry
2581 * @cur_fltr: filter information from the book keeping entry
2582 * @new_fltr: filter information with the new VSI to be added
2584 * Call AQ command to add or update previously created VSI list with new VSI.
2586 * Helper function to do book keeping associated with adding filter information
2587 * The algorithm to do the book keeping is described below :
2588 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2589 * if only one VSI has been added till now
2590 * Allocate a new VSI list and add two VSIs
2591 * to this list using switch rule command
2592 * Update the previously created switch rule with the
2593 * newly created VSI list ID
2594 * if a VSI list was previously created
2595 * Add the new VSI to the previously created VSI list set
2596 * using the update switch rule command
2598 static enum ice_status
2599 ice_add_update_vsi_list(struct ice_hw *hw,
2600 struct ice_fltr_mgmt_list_entry *m_entry,
2601 struct ice_fltr_info *cur_fltr,
2602 struct ice_fltr_info *new_fltr)
2604 enum ice_status status = ICE_SUCCESS;
2605 u16 vsi_list_id = 0;
2607 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2608 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2609 return ICE_ERR_NOT_IMPL;
2611 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2612 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2613 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2614 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2615 return ICE_ERR_NOT_IMPL;
2617 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2618 /* Only one entry existed in the mapping and it was not already
2619 * a part of a VSI list. So, create a VSI list with the old and
2622 struct ice_fltr_info tmp_fltr;
2623 u16 vsi_handle_arr[2];
2625 /* A rule already exists with the new VSI being added */
2626 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2627 return ICE_ERR_ALREADY_EXISTS;
2629 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2630 vsi_handle_arr[1] = new_fltr->vsi_handle;
2631 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2633 new_fltr->lkup_type);
2637 tmp_fltr = *new_fltr;
2638 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2639 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2640 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2641 /* Update the previous switch rule of "MAC forward to VSI" to
2642 * "MAC fwd to VSI list"
2644 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2648 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2649 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2650 m_entry->vsi_list_info =
2651 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2654 /* If this entry was large action then the large action needs
2655 * to be updated to point to FWD to VSI list
2657 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2659 ice_add_marker_act(hw, m_entry,
2660 m_entry->sw_marker_id,
2661 m_entry->lg_act_idx);
2663 u16 vsi_handle = new_fltr->vsi_handle;
2664 enum ice_adminq_opc opcode;
2666 if (!m_entry->vsi_list_info)
2669 /* A rule already exists with the new VSI being added */
2670 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2673 /* Update the previously created VSI list set with
2674 * the new VSI ID passed in
2676 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2677 opcode = ice_aqc_opc_update_sw_rules;
2679 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2680 vsi_list_id, false, opcode,
2681 new_fltr->lkup_type);
2682 /* update VSI list mapping info with new VSI ID */
2684 ice_set_bit(vsi_handle,
2685 m_entry->vsi_list_info->vsi_map);
2688 m_entry->vsi_count++;
2693 * ice_find_rule_entry - Search a rule entry
2694 * @hw: pointer to the hardware structure
2695 * @recp_id: lookup type for which the specified rule needs to be searched
2696 * @f_info: rule information
2698 * Helper function to search for a given rule entry
2699 * Returns pointer to entry storing the rule if found
2701 static struct ice_fltr_mgmt_list_entry *
2702 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2704 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2705 struct ice_switch_info *sw = hw->switch_info;
2706 struct LIST_HEAD_TYPE *list_head;
2708 list_head = &sw->recp_list[recp_id].filt_rules;
2709 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2711 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2712 sizeof(f_info->l_data)) &&
2713 f_info->flag == list_itr->fltr_info.flag) {
2722 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2723 * @hw: pointer to the hardware structure
2724 * @recp_id: lookup type for which VSI lists needs to be searched
2725 * @vsi_handle: VSI handle to be found in VSI list
2726 * @vsi_list_id: VSI list ID found containing vsi_handle
2728 * Helper function to search a VSI list with single entry containing given VSI
2729 * handle element. This can be extended further to search VSI list with more
2730 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2732 static struct ice_vsi_list_map_info *
2733 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2736 struct ice_vsi_list_map_info *map_info = NULL;
2737 struct ice_switch_info *sw = hw->switch_info;
2738 struct LIST_HEAD_TYPE *list_head;
2740 list_head = &sw->recp_list[recp_id].filt_rules;
2741 if (sw->recp_list[recp_id].adv_rule) {
2742 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2744 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2745 ice_adv_fltr_mgmt_list_entry,
2747 if (list_itr->vsi_list_info) {
2748 map_info = list_itr->vsi_list_info;
2749 if (ice_is_bit_set(map_info->vsi_map,
2751 *vsi_list_id = map_info->vsi_list_id;
2757 struct ice_fltr_mgmt_list_entry *list_itr;
2759 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2760 ice_fltr_mgmt_list_entry,
2762 if (list_itr->vsi_count == 1 &&
2763 list_itr->vsi_list_info) {
2764 map_info = list_itr->vsi_list_info;
2765 if (ice_is_bit_set(map_info->vsi_map,
2767 *vsi_list_id = map_info->vsi_list_id;
2777 * ice_add_rule_internal - add rule for a given lookup type
2778 * @hw: pointer to the hardware structure
2779 * @recp_id: lookup type (recipe ID) for which rule has to be added
2780 * @f_entry: structure containing MAC forwarding information
2782 * Adds or updates the rule lists for a given recipe
2784 static enum ice_status
2785 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2786 struct ice_fltr_list_entry *f_entry)
2788 struct ice_switch_info *sw = hw->switch_info;
2789 struct ice_fltr_info *new_fltr, *cur_fltr;
2790 struct ice_fltr_mgmt_list_entry *m_entry;
2791 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2792 enum ice_status status = ICE_SUCCESS;
2794 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2795 return ICE_ERR_PARAM;
2797 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2798 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2799 f_entry->fltr_info.fwd_id.hw_vsi_id =
2800 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2802 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2804 ice_acquire_lock(rule_lock);
2805 new_fltr = &f_entry->fltr_info;
2806 if (new_fltr->flag & ICE_FLTR_RX)
2807 new_fltr->src = hw->port_info->lport;
2808 else if (new_fltr->flag & ICE_FLTR_TX)
2810 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2812 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2814 status = ice_create_pkt_fwd_rule(hw, f_entry);
2815 goto exit_add_rule_internal;
2818 cur_fltr = &m_entry->fltr_info;
2819 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2821 exit_add_rule_internal:
2822 ice_release_lock(rule_lock);
2827 * ice_remove_vsi_list_rule
2828 * @hw: pointer to the hardware structure
2829 * @vsi_list_id: VSI list ID generated as part of allocate resource
2830 * @lkup_type: switch rule filter lookup type
2832 * The VSI list should be emptied before this function is called to remove the
2835 static enum ice_status
2836 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2837 enum ice_sw_lkup_type lkup_type)
2839 struct ice_aqc_sw_rules_elem *s_rule;
2840 enum ice_status status;
2843 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2844 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2846 return ICE_ERR_NO_MEMORY;
2848 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2849 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2851 /* Free the vsi_list resource that we allocated. It is assumed that the
2852 * list is empty at this point.
2854 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2855 ice_aqc_opc_free_res);
2857 ice_free(hw, s_rule);
2862 * ice_rem_update_vsi_list
2863 * @hw: pointer to the hardware structure
2864 * @vsi_handle: VSI handle of the VSI to remove
2865 * @fm_list: filter management entry for which the VSI list management needs to
2868 static enum ice_status
2869 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2870 struct ice_fltr_mgmt_list_entry *fm_list)
2872 enum ice_sw_lkup_type lkup_type;
2873 enum ice_status status = ICE_SUCCESS;
2876 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2877 fm_list->vsi_count == 0)
2878 return ICE_ERR_PARAM;
2880 /* A rule with the VSI being removed does not exist */
2881 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2882 return ICE_ERR_DOES_NOT_EXIST;
2884 lkup_type = fm_list->fltr_info.lkup_type;
2885 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2886 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2887 ice_aqc_opc_update_sw_rules,
2892 fm_list->vsi_count--;
2893 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2895 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2896 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2897 struct ice_vsi_list_map_info *vsi_list_info =
2898 fm_list->vsi_list_info;
2901 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2903 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2904 return ICE_ERR_OUT_OF_RANGE;
2906 /* Make sure VSI list is empty before removing it below */
2907 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2909 ice_aqc_opc_update_sw_rules,
2914 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2915 tmp_fltr_info.fwd_id.hw_vsi_id =
2916 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2917 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2918 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2920 ice_debug(hw, ICE_DBG_SW,
2921 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2922 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2926 fm_list->fltr_info = tmp_fltr_info;
2929 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2930 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2931 struct ice_vsi_list_map_info *vsi_list_info =
2932 fm_list->vsi_list_info;
2934 /* Remove the VSI list since it is no longer used */
2935 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2937 ice_debug(hw, ICE_DBG_SW,
2938 "Failed to remove VSI list %d, error %d\n",
2939 vsi_list_id, status);
2943 LIST_DEL(&vsi_list_info->list_entry);
2944 ice_free(hw, vsi_list_info);
2945 fm_list->vsi_list_info = NULL;
2952 * ice_remove_rule_internal - Remove a filter rule of a given type
2954 * @hw: pointer to the hardware structure
2955 * @recp_id: recipe ID for which the rule needs to removed
2956 * @f_entry: rule entry containing filter information
2958 static enum ice_status
2959 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2960 struct ice_fltr_list_entry *f_entry)
2962 struct ice_switch_info *sw = hw->switch_info;
2963 struct ice_fltr_mgmt_list_entry *list_elem;
2964 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2965 enum ice_status status = ICE_SUCCESS;
2966 bool remove_rule = false;
2969 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2970 return ICE_ERR_PARAM;
2971 f_entry->fltr_info.fwd_id.hw_vsi_id =
2972 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2974 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2975 ice_acquire_lock(rule_lock);
2976 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2978 status = ICE_ERR_DOES_NOT_EXIST;
2982 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2984 } else if (!list_elem->vsi_list_info) {
2985 status = ICE_ERR_DOES_NOT_EXIST;
2987 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2988 /* a ref_cnt > 1 indicates that the vsi_list is being
2989 * shared by multiple rules. Decrement the ref_cnt and
2990 * remove this rule, but do not modify the list, as it
2991 * is in-use by other rules.
2993 list_elem->vsi_list_info->ref_cnt--;
2996 /* a ref_cnt of 1 indicates the vsi_list is only used
2997 * by one rule. However, the original removal request is only
2998 * for a single VSI. Update the vsi_list first, and only
2999 * remove the rule if there are no further VSIs in this list.
3001 vsi_handle = f_entry->fltr_info.vsi_handle;
3002 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3005 /* if VSI count goes to zero after updating the VSI list */
3006 if (list_elem->vsi_count == 0)
3011 /* Remove the lookup rule */
3012 struct ice_aqc_sw_rules_elem *s_rule;
3014 s_rule = (struct ice_aqc_sw_rules_elem *)
3015 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3017 status = ICE_ERR_NO_MEMORY;
3021 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3022 ice_aqc_opc_remove_sw_rules);
3024 status = ice_aq_sw_rules(hw, s_rule,
3025 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3026 ice_aqc_opc_remove_sw_rules, NULL);
3028 /* Remove a book keeping from the list */
3029 ice_free(hw, s_rule);
3034 LIST_DEL(&list_elem->list_entry);
3035 ice_free(hw, list_elem);
3038 ice_release_lock(rule_lock);
3043 * ice_aq_get_res_alloc - get allocated resources
3044 * @hw: pointer to the HW struct
3045 * @num_entries: pointer to u16 to store the number of resource entries returned
3046 * @buf: pointer to user-supplied buffer
3047 * @buf_size: size of buff
3048 * @cd: pointer to command details structure or NULL
3050 * The user-supplied buffer must be large enough to store the resource
3051 * information for all resource types. Each resource type is an
3052 * ice_aqc_get_res_resp_data_elem structure.
3055 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3056 u16 buf_size, struct ice_sq_cd *cd)
3058 struct ice_aqc_get_res_alloc *resp;
3059 enum ice_status status;
3060 struct ice_aq_desc desc;
3063 return ICE_ERR_BAD_PTR;
3065 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3066 return ICE_ERR_INVAL_SIZE;
3068 resp = &desc.params.get_res;
3070 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3071 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3073 if (!status && num_entries)
3074 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3080 * ice_aq_get_res_descs - get allocated resource descriptors
3081 * @hw: pointer to the hardware structure
3082 * @num_entries: number of resource entries in buffer
3083 * @buf: Indirect buffer to hold data parameters and response
3084 * @buf_size: size of buffer for indirect commands
3085 * @res_type: resource type
3086 * @res_shared: is resource shared
3087 * @desc_id: input - first desc ID to start; output - next desc ID
3088 * @cd: pointer to command details structure or NULL
3091 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3092 struct ice_aqc_get_allocd_res_desc_resp *buf,
3093 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3094 struct ice_sq_cd *cd)
3096 struct ice_aqc_get_allocd_res_desc *cmd;
3097 struct ice_aq_desc desc;
3098 enum ice_status status;
3100 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3102 cmd = &desc.params.get_res_desc;
3105 return ICE_ERR_PARAM;
3107 if (buf_size != (num_entries * sizeof(*buf)))
3108 return ICE_ERR_PARAM;
3110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3112 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3113 ICE_AQC_RES_TYPE_M) | (res_shared ?
3114 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3115 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3117 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3119 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3125 * ice_add_mac - Add a MAC address based filter rule
3126 * @hw: pointer to the hardware structure
3127 * @m_list: list of MAC addresses and forwarding information
3129 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3130 * multiple unicast addresses, the function assumes that all the
3131 * addresses are unique in a given add_mac call. It doesn't
3132 * check for duplicates in this case, removing duplicates from a given
3133 * list should be taken care of in the caller of this function.
3136 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3138 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3139 struct ice_fltr_list_entry *m_list_itr;
3140 struct LIST_HEAD_TYPE *rule_head;
3141 u16 elem_sent, total_elem_left;
3142 struct ice_switch_info *sw;
3143 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3144 enum ice_status status = ICE_SUCCESS;
3145 u16 num_unicast = 0;
3149 return ICE_ERR_PARAM;
3151 sw = hw->switch_info;
3152 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3153 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3155 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3159 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3160 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3161 if (!ice_is_vsi_valid(hw, vsi_handle))
3162 return ICE_ERR_PARAM;
3163 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3164 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3165 /* update the src in case it is VSI num */
3166 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3167 return ICE_ERR_PARAM;
3168 m_list_itr->fltr_info.src = hw_vsi_id;
3169 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3170 IS_ZERO_ETHER_ADDR(add))
3171 return ICE_ERR_PARAM;
3172 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3173 /* Don't overwrite the unicast address */
3174 ice_acquire_lock(rule_lock);
3175 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3176 &m_list_itr->fltr_info)) {
3177 ice_release_lock(rule_lock);
3178 return ICE_ERR_ALREADY_EXISTS;
3180 ice_release_lock(rule_lock);
3182 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3183 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3184 m_list_itr->status =
3185 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3187 if (m_list_itr->status)
3188 return m_list_itr->status;
3192 ice_acquire_lock(rule_lock);
3193 /* Exit if no suitable entries were found for adding bulk switch rule */
3195 status = ICE_SUCCESS;
3196 goto ice_add_mac_exit;
3199 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3201 /* Allocate switch rule buffer for the bulk update for unicast */
3202 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3203 s_rule = (struct ice_aqc_sw_rules_elem *)
3204 ice_calloc(hw, num_unicast, s_rule_size);
3206 status = ICE_ERR_NO_MEMORY;
3207 goto ice_add_mac_exit;
3211 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3213 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3214 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3216 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3217 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3218 ice_aqc_opc_add_sw_rules);
3219 r_iter = (struct ice_aqc_sw_rules_elem *)
3220 ((u8 *)r_iter + s_rule_size);
3224 /* Call AQ bulk switch rule update for all unicast addresses */
3226 /* Call AQ switch rule in AQ_MAX chunk */
3227 for (total_elem_left = num_unicast; total_elem_left > 0;
3228 total_elem_left -= elem_sent) {
3229 struct ice_aqc_sw_rules_elem *entry = r_iter;
3231 elem_sent = min(total_elem_left,
3232 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3233 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3234 elem_sent, ice_aqc_opc_add_sw_rules,
3237 goto ice_add_mac_exit;
3238 r_iter = (struct ice_aqc_sw_rules_elem *)
3239 ((u8 *)r_iter + (elem_sent * s_rule_size));
3242 /* Fill up rule ID based on the value returned from FW */
3244 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3246 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3247 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3248 struct ice_fltr_mgmt_list_entry *fm_entry;
3250 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3251 f_info->fltr_rule_id =
3252 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3253 f_info->fltr_act = ICE_FWD_TO_VSI;
3254 /* Create an entry to track this MAC address */
3255 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3256 ice_malloc(hw, sizeof(*fm_entry));
3258 status = ICE_ERR_NO_MEMORY;
3259 goto ice_add_mac_exit;
3261 fm_entry->fltr_info = *f_info;
3262 fm_entry->vsi_count = 1;
3263 /* The book keeping entries will get removed when
3264 * base driver calls remove filter AQ command
3267 LIST_ADD(&fm_entry->list_entry, rule_head);
3268 r_iter = (struct ice_aqc_sw_rules_elem *)
3269 ((u8 *)r_iter + s_rule_size);
3274 ice_release_lock(rule_lock);
3276 ice_free(hw, s_rule);
3281 * ice_add_vlan_internal - Add one VLAN based filter rule
3282 * @hw: pointer to the hardware structure
3283 * @f_entry: filter entry containing one VLAN information
3285 static enum ice_status
3286 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3288 struct ice_switch_info *sw = hw->switch_info;
3289 struct ice_fltr_mgmt_list_entry *v_list_itr;
3290 struct ice_fltr_info *new_fltr, *cur_fltr;
3291 enum ice_sw_lkup_type lkup_type;
3292 u16 vsi_list_id = 0, vsi_handle;
3293 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3294 enum ice_status status = ICE_SUCCESS;
3296 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3297 return ICE_ERR_PARAM;
3299 f_entry->fltr_info.fwd_id.hw_vsi_id =
3300 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3301 new_fltr = &f_entry->fltr_info;
3303 /* VLAN ID should only be 12 bits */
3304 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3305 return ICE_ERR_PARAM;
3307 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3308 return ICE_ERR_PARAM;
3310 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3311 lkup_type = new_fltr->lkup_type;
3312 vsi_handle = new_fltr->vsi_handle;
3313 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3314 ice_acquire_lock(rule_lock);
3315 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3317 struct ice_vsi_list_map_info *map_info = NULL;
3319 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3320 /* All VLAN pruning rules use a VSI list. Check if
3321 * there is already a VSI list containing VSI that we
3322 * want to add. If found, use the same vsi_list_id for
3323 * this new VLAN rule or else create a new list.
3325 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3329 status = ice_create_vsi_list_rule(hw,
3337 /* Convert the action to forwarding to a VSI list. */
3338 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3339 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3342 status = ice_create_pkt_fwd_rule(hw, f_entry);
3344 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3347 status = ICE_ERR_DOES_NOT_EXIST;
3350 /* reuse VSI list for new rule and increment ref_cnt */
3352 v_list_itr->vsi_list_info = map_info;
3353 map_info->ref_cnt++;
3355 v_list_itr->vsi_list_info =
3356 ice_create_vsi_list_map(hw, &vsi_handle,
3360 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3361 /* Update existing VSI list to add new VSI ID only if it used
3364 cur_fltr = &v_list_itr->fltr_info;
3365 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3368 /* If VLAN rule exists and VSI list being used by this rule is
3369 * referenced by more than 1 VLAN rule. Then create a new VSI
3370 * list appending previous VSI with new VSI and update existing
3371 * VLAN rule to point to new VSI list ID
3373 struct ice_fltr_info tmp_fltr;
3374 u16 vsi_handle_arr[2];
3377 /* Current implementation only supports reusing VSI list with
3378 * one VSI count. We should never hit below condition
3380 if (v_list_itr->vsi_count > 1 &&
3381 v_list_itr->vsi_list_info->ref_cnt > 1) {
3382 ice_debug(hw, ICE_DBG_SW,
3383 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3384 status = ICE_ERR_CFG;
3389 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3392 /* A rule already exists with the new VSI being added */
3393 if (cur_handle == vsi_handle) {
3394 status = ICE_ERR_ALREADY_EXISTS;
3398 vsi_handle_arr[0] = cur_handle;
3399 vsi_handle_arr[1] = vsi_handle;
3400 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3401 &vsi_list_id, lkup_type);
3405 tmp_fltr = v_list_itr->fltr_info;
3406 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3407 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3408 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3409 /* Update the previous switch rule to a new VSI list which
3410 * includes current VSI that is requested
3412 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3416 /* before overriding VSI list map info. decrement ref_cnt of
3419 v_list_itr->vsi_list_info->ref_cnt--;
3421 /* now update to newly created list */
3422 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3423 v_list_itr->vsi_list_info =
3424 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3426 v_list_itr->vsi_count++;
3430 ice_release_lock(rule_lock);
3435 * ice_add_vlan - Add VLAN based filter rule
3436 * @hw: pointer to the hardware structure
3437 * @v_list: list of VLAN entries and forwarding information
3440 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3442 struct ice_fltr_list_entry *v_list_itr;
3445 return ICE_ERR_PARAM;
3447 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3449 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3450 return ICE_ERR_PARAM;
3451 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3452 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3453 if (v_list_itr->status)
3454 return v_list_itr->status;
3460 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3461 * @hw: pointer to the hardware structure
3462 * @mv_list: list of MAC and VLAN filters
3464 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3465 * pruning bits enabled, then it is the responsibility of the caller to make
3466 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3467 * VLAN won't be received on that VSI otherwise.
3470 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3472 struct ice_fltr_list_entry *mv_list_itr;
3474 if (!mv_list || !hw)
3475 return ICE_ERR_PARAM;
3477 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3479 enum ice_sw_lkup_type l_type =
3480 mv_list_itr->fltr_info.lkup_type;
3482 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3483 return ICE_ERR_PARAM;
3484 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3485 mv_list_itr->status =
3486 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3488 if (mv_list_itr->status)
3489 return mv_list_itr->status;
3495 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3496 * @hw: pointer to the hardware structure
3497 * @em_list: list of ether type MAC filter, MAC is optional
3499 * This function requires the caller to populate the entries in
3500 * the filter list with the necessary fields (including flags to
3501 * indicate Tx or Rx rules).
3504 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3506 struct ice_fltr_list_entry *em_list_itr;
3508 if (!em_list || !hw)
3509 return ICE_ERR_PARAM;
3511 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3513 enum ice_sw_lkup_type l_type =
3514 em_list_itr->fltr_info.lkup_type;
3516 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3517 l_type != ICE_SW_LKUP_ETHERTYPE)
3518 return ICE_ERR_PARAM;
3520 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3522 if (em_list_itr->status)
3523 return em_list_itr->status;
3529 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3530 * @hw: pointer to the hardware structure
3531 * @em_list: list of ethertype or ethertype MAC entries
3534 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3536 struct ice_fltr_list_entry *em_list_itr, *tmp;
3538 if (!em_list || !hw)
3539 return ICE_ERR_PARAM;
3541 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3543 enum ice_sw_lkup_type l_type =
3544 em_list_itr->fltr_info.lkup_type;
3546 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3547 l_type != ICE_SW_LKUP_ETHERTYPE)
3548 return ICE_ERR_PARAM;
3550 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3552 if (em_list_itr->status)
3553 return em_list_itr->status;
3559 * ice_rem_sw_rule_info
3560 * @hw: pointer to the hardware structure
3561 * @rule_head: pointer to the switch list structure that we want to delete
3564 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3566 if (!LIST_EMPTY(rule_head)) {
3567 struct ice_fltr_mgmt_list_entry *entry;
3568 struct ice_fltr_mgmt_list_entry *tmp;
3570 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3571 ice_fltr_mgmt_list_entry, list_entry) {
3572 LIST_DEL(&entry->list_entry);
3573 ice_free(hw, entry);
3579 * ice_rem_adv_rule_info
3580 * @hw: pointer to the hardware structure
3581 * @rule_head: pointer to the switch list structure that we want to delete
3584 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3586 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3587 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3589 if (LIST_EMPTY(rule_head))
3592 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3593 ice_adv_fltr_mgmt_list_entry, list_entry) {
3594 LIST_DEL(&lst_itr->list_entry);
3595 ice_free(hw, lst_itr->lkups);
3596 ice_free(hw, lst_itr);
3601 * ice_rem_all_sw_rules_info
3602 * @hw: pointer to the hardware structure
3604 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3606 struct ice_switch_info *sw = hw->switch_info;
3609 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3610 struct LIST_HEAD_TYPE *rule_head;
3612 rule_head = &sw->recp_list[i].filt_rules;
3613 if (!sw->recp_list[i].adv_rule)
3614 ice_rem_sw_rule_info(hw, rule_head);
3616 ice_rem_adv_rule_info(hw, rule_head);
3621 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3622 * @pi: pointer to the port_info structure
3623 * @vsi_handle: VSI handle to set as default
3624 * @set: true to add the above mentioned switch rule, false to remove it
3625 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3627 * add filter rule to set/unset given VSI as default VSI for the switch
3628 * (represented by swid)
3631 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3634 struct ice_aqc_sw_rules_elem *s_rule;
3635 struct ice_fltr_info f_info;
3636 struct ice_hw *hw = pi->hw;
3637 enum ice_adminq_opc opcode;
3638 enum ice_status status;
3642 if (!ice_is_vsi_valid(hw, vsi_handle))
3643 return ICE_ERR_PARAM;
3644 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3646 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3647 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3648 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3650 return ICE_ERR_NO_MEMORY;
3652 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3654 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3655 f_info.flag = direction;
3656 f_info.fltr_act = ICE_FWD_TO_VSI;
3657 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3659 if (f_info.flag & ICE_FLTR_RX) {
3660 f_info.src = pi->lport;
3661 f_info.src_id = ICE_SRC_ID_LPORT;
3663 f_info.fltr_rule_id =
3664 pi->dflt_rx_vsi_rule_id;
3665 } else if (f_info.flag & ICE_FLTR_TX) {
3666 f_info.src_id = ICE_SRC_ID_VSI;
3667 f_info.src = hw_vsi_id;
3669 f_info.fltr_rule_id =
3670 pi->dflt_tx_vsi_rule_id;
3674 opcode = ice_aqc_opc_add_sw_rules;
3676 opcode = ice_aqc_opc_remove_sw_rules;
3678 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3680 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3681 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3684 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3686 if (f_info.flag & ICE_FLTR_TX) {
3687 pi->dflt_tx_vsi_num = hw_vsi_id;
3688 pi->dflt_tx_vsi_rule_id = index;
3689 } else if (f_info.flag & ICE_FLTR_RX) {
3690 pi->dflt_rx_vsi_num = hw_vsi_id;
3691 pi->dflt_rx_vsi_rule_id = index;
3694 if (f_info.flag & ICE_FLTR_TX) {
3695 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3696 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3697 } else if (f_info.flag & ICE_FLTR_RX) {
3698 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3699 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3704 ice_free(hw, s_rule);
3709 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3710 * @hw: pointer to the hardware structure
3711 * @recp_id: lookup type for which the specified rule needs to be searched
3712 * @f_info: rule information
3714 * Helper function to search for a unicast rule entry - this is to be used
3715 * to remove unicast MAC filter that is not shared with other VSIs on the
3718 * Returns pointer to entry storing the rule if found
3720 static struct ice_fltr_mgmt_list_entry *
3721 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3722 struct ice_fltr_info *f_info)
3724 struct ice_switch_info *sw = hw->switch_info;
3725 struct ice_fltr_mgmt_list_entry *list_itr;
3726 struct LIST_HEAD_TYPE *list_head;
3728 list_head = &sw->recp_list[recp_id].filt_rules;
3729 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3731 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3732 sizeof(f_info->l_data)) &&
3733 f_info->fwd_id.hw_vsi_id ==
3734 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3735 f_info->flag == list_itr->fltr_info.flag)
3742 * ice_remove_mac - remove a MAC address based filter rule
3743 * @hw: pointer to the hardware structure
3744 * @m_list: list of MAC addresses and forwarding information
3746 * This function removes either a MAC filter rule or a specific VSI from a
3747 * VSI list for a multicast MAC address.
3749 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3750 * ice_add_mac. Caller should be aware that this call will only work if all
3751 * the entries passed into m_list were added previously. It will not attempt to
3752 * do a partial remove of entries that were found.
3755 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3757 struct ice_fltr_list_entry *list_itr, *tmp;
3758 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3761 return ICE_ERR_PARAM;
3763 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3764 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3766 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3767 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3770 if (l_type != ICE_SW_LKUP_MAC)
3771 return ICE_ERR_PARAM;
3773 vsi_handle = list_itr->fltr_info.vsi_handle;
3774 if (!ice_is_vsi_valid(hw, vsi_handle))
3775 return ICE_ERR_PARAM;
3777 list_itr->fltr_info.fwd_id.hw_vsi_id =
3778 ice_get_hw_vsi_num(hw, vsi_handle);
3779 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3780 /* Don't remove the unicast address that belongs to
3781 * another VSI on the switch, since it is not being
3784 ice_acquire_lock(rule_lock);
3785 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3786 &list_itr->fltr_info)) {
3787 ice_release_lock(rule_lock);
3788 return ICE_ERR_DOES_NOT_EXIST;
3790 ice_release_lock(rule_lock);
3792 list_itr->status = ice_remove_rule_internal(hw,
3795 if (list_itr->status)
3796 return list_itr->status;
3802 * ice_remove_vlan - Remove VLAN based filter rule
3803 * @hw: pointer to the hardware structure
3804 * @v_list: list of VLAN entries and forwarding information
3807 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3809 struct ice_fltr_list_entry *v_list_itr, *tmp;
3812 return ICE_ERR_PARAM;
3814 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3816 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3818 if (l_type != ICE_SW_LKUP_VLAN)
3819 return ICE_ERR_PARAM;
3820 v_list_itr->status = ice_remove_rule_internal(hw,
3823 if (v_list_itr->status)
3824 return v_list_itr->status;
3830 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3831 * @hw: pointer to the hardware structure
3832 * @v_list: list of MAC VLAN entries and forwarding information
3835 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3837 struct ice_fltr_list_entry *v_list_itr, *tmp;
3840 return ICE_ERR_PARAM;
3842 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3844 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3846 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3847 return ICE_ERR_PARAM;
3848 v_list_itr->status =
3849 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3851 if (v_list_itr->status)
3852 return v_list_itr->status;
3858 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3859 * @fm_entry: filter entry to inspect
3860 * @vsi_handle: VSI handle to compare with filter info
3863 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3865 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3866 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3867 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3868 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3873 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3874 * @hw: pointer to the hardware structure
3875 * @vsi_handle: VSI handle to remove filters from
3876 * @vsi_list_head: pointer to the list to add entry to
3877 * @fi: pointer to fltr_info of filter entry to copy & add
3879 * Helper function, used when creating a list of filters to remove from
3880 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3881 * original filter entry, with the exception of fltr_info.fltr_act and
3882 * fltr_info.fwd_id fields. These are set such that later logic can
3883 * extract which VSI to remove the fltr from, and pass on that information.
3885 static enum ice_status
3886 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3887 struct LIST_HEAD_TYPE *vsi_list_head,
3888 struct ice_fltr_info *fi)
3890 struct ice_fltr_list_entry *tmp;
3892 /* this memory is freed up in the caller function
3893 * once filters for this VSI are removed
3895 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3897 return ICE_ERR_NO_MEMORY;
3899 tmp->fltr_info = *fi;
3901 /* Overwrite these fields to indicate which VSI to remove filter from,
3902 * so find and remove logic can extract the information from the
3903 * list entries. Note that original entries will still have proper
3906 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3907 tmp->fltr_info.vsi_handle = vsi_handle;
3908 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3910 LIST_ADD(&tmp->list_entry, vsi_list_head);
3916 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3917 * @hw: pointer to the hardware structure
3918 * @vsi_handle: VSI handle to remove filters from
3919 * @lkup_list_head: pointer to the list that has certain lookup type filters
3920 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3922 * Locates all filters in lkup_list_head that are used by the given VSI,
3923 * and adds COPIES of those entries to vsi_list_head (intended to be used
3924 * to remove the listed filters).
3925 * Note that this means all entries in vsi_list_head must be explicitly
3926 * deallocated by the caller when done with list.
3928 static enum ice_status
3929 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3930 struct LIST_HEAD_TYPE *lkup_list_head,
3931 struct LIST_HEAD_TYPE *vsi_list_head)
3933 struct ice_fltr_mgmt_list_entry *fm_entry;
3934 enum ice_status status = ICE_SUCCESS;
3936 /* check to make sure VSI ID is valid and within boundary */
3937 if (!ice_is_vsi_valid(hw, vsi_handle))
3938 return ICE_ERR_PARAM;
3940 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3941 ice_fltr_mgmt_list_entry, list_entry) {
3942 struct ice_fltr_info *fi;
3944 fi = &fm_entry->fltr_info;
3945 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3948 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3957 * ice_determine_promisc_mask
3958 * @fi: filter info to parse
3960 * Helper function to determine which ICE_PROMISC_ mask corresponds
3961 * to given filter into.
3963 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3965 u16 vid = fi->l_data.mac_vlan.vlan_id;
3966 u8 *macaddr = fi->l_data.mac.mac_addr;
3967 bool is_tx_fltr = false;
3968 u8 promisc_mask = 0;
3970 if (fi->flag == ICE_FLTR_TX)
3973 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3974 promisc_mask |= is_tx_fltr ?
3975 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3976 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3977 promisc_mask |= is_tx_fltr ?
3978 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3979 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3980 promisc_mask |= is_tx_fltr ?
3981 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3983 promisc_mask |= is_tx_fltr ?
3984 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3986 return promisc_mask;
3990 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3991 * @hw: pointer to the hardware structure
3992 * @vsi_handle: VSI handle to retrieve info from
3993 * @promisc_mask: pointer to mask to be filled in
3994 * @vid: VLAN ID of promisc VLAN VSI
3997 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4000 struct ice_switch_info *sw = hw->switch_info;
4001 struct ice_fltr_mgmt_list_entry *itr;
4002 struct LIST_HEAD_TYPE *rule_head;
4003 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4005 if (!ice_is_vsi_valid(hw, vsi_handle))
4006 return ICE_ERR_PARAM;
4010 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4011 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4013 ice_acquire_lock(rule_lock);
4014 LIST_FOR_EACH_ENTRY(itr, rule_head,
4015 ice_fltr_mgmt_list_entry, list_entry) {
4016 /* Continue if this filter doesn't apply to this VSI or the
4017 * VSI ID is not in the VSI map for this filter
4019 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4022 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4024 ice_release_lock(rule_lock);
4030 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4031 * @hw: pointer to the hardware structure
4032 * @vsi_handle: VSI handle to retrieve info from
4033 * @promisc_mask: pointer to mask to be filled in
4034 * @vid: VLAN ID of promisc VLAN VSI
4037 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4040 struct ice_switch_info *sw = hw->switch_info;
4041 struct ice_fltr_mgmt_list_entry *itr;
4042 struct LIST_HEAD_TYPE *rule_head;
4043 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4045 if (!ice_is_vsi_valid(hw, vsi_handle))
4046 return ICE_ERR_PARAM;
4050 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4051 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4053 ice_acquire_lock(rule_lock);
4054 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4056 /* Continue if this filter doesn't apply to this VSI or the
4057 * VSI ID is not in the VSI map for this filter
4059 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4062 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4064 ice_release_lock(rule_lock);
4070 * ice_remove_promisc - Remove promisc based filter rules
4071 * @hw: pointer to the hardware structure
4072 * @recp_id: recipe ID for which the rule needs to removed
4073 * @v_list: list of promisc entries
4075 static enum ice_status
4076 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4077 struct LIST_HEAD_TYPE *v_list)
4079 struct ice_fltr_list_entry *v_list_itr, *tmp;
4081 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4083 v_list_itr->status =
4084 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4085 if (v_list_itr->status)
4086 return v_list_itr->status;
4092 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4093 * @hw: pointer to the hardware structure
4094 * @vsi_handle: VSI handle to clear mode
4095 * @promisc_mask: mask of promiscuous config bits to clear
4096 * @vid: VLAN ID to clear VLAN promiscuous
4099 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4102 struct ice_switch_info *sw = hw->switch_info;
4103 struct ice_fltr_list_entry *fm_entry, *tmp;
4104 struct LIST_HEAD_TYPE remove_list_head;
4105 struct ice_fltr_mgmt_list_entry *itr;
4106 struct LIST_HEAD_TYPE *rule_head;
4107 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4108 enum ice_status status = ICE_SUCCESS;
4111 if (!ice_is_vsi_valid(hw, vsi_handle))
4112 return ICE_ERR_PARAM;
4114 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4115 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4117 recipe_id = ICE_SW_LKUP_PROMISC;
4119 rule_head = &sw->recp_list[recipe_id].filt_rules;
4120 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4122 INIT_LIST_HEAD(&remove_list_head);
4124 ice_acquire_lock(rule_lock);
4125 LIST_FOR_EACH_ENTRY(itr, rule_head,
4126 ice_fltr_mgmt_list_entry, list_entry) {
4127 struct ice_fltr_info *fltr_info;
4128 u8 fltr_promisc_mask = 0;
4130 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4132 fltr_info = &itr->fltr_info;
4134 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4135 vid != fltr_info->l_data.mac_vlan.vlan_id)
4138 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4140 /* Skip if filter is not completely specified by given mask */
4141 if (fltr_promisc_mask & ~promisc_mask)
4144 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4148 ice_release_lock(rule_lock);
4149 goto free_fltr_list;
4152 ice_release_lock(rule_lock);
4154 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4157 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4158 ice_fltr_list_entry, list_entry) {
4159 LIST_DEL(&fm_entry->list_entry);
4160 ice_free(hw, fm_entry);
4167 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4168 * @hw: pointer to the hardware structure
4169 * @vsi_handle: VSI handle to configure
4170 * @promisc_mask: mask of promiscuous config bits
4171 * @vid: VLAN ID to set VLAN promiscuous
4174 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4176 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4177 struct ice_fltr_list_entry f_list_entry;
4178 struct ice_fltr_info new_fltr;
4179 enum ice_status status = ICE_SUCCESS;
4185 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4187 if (!ice_is_vsi_valid(hw, vsi_handle))
4188 return ICE_ERR_PARAM;
4189 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4191 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4193 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4194 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4195 new_fltr.l_data.mac_vlan.vlan_id = vid;
4196 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4198 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4199 recipe_id = ICE_SW_LKUP_PROMISC;
4202 /* Separate filters must be set for each direction/packet type
4203 * combination, so we will loop over the mask value, store the
4204 * individual type, and clear it out in the input mask as it
4207 while (promisc_mask) {
4213 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4214 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4215 pkt_type = UCAST_FLTR;
4216 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4217 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4218 pkt_type = UCAST_FLTR;
4220 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4221 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4222 pkt_type = MCAST_FLTR;
4223 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4224 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4225 pkt_type = MCAST_FLTR;
4227 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4228 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4229 pkt_type = BCAST_FLTR;
4230 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4231 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4232 pkt_type = BCAST_FLTR;
4236 /* Check for VLAN promiscuous flag */
4237 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4238 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4239 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4240 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4244 /* Set filter DA based on packet type */
4245 mac_addr = new_fltr.l_data.mac.mac_addr;
4246 if (pkt_type == BCAST_FLTR) {
4247 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4248 } else if (pkt_type == MCAST_FLTR ||
4249 pkt_type == UCAST_FLTR) {
4250 /* Use the dummy ether header DA */
4251 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4252 ICE_NONDMA_TO_NONDMA);
4253 if (pkt_type == MCAST_FLTR)
4254 mac_addr[0] |= 0x1; /* Set multicast bit */
4257 /* Need to reset this to zero for all iterations */
4260 new_fltr.flag |= ICE_FLTR_TX;
4261 new_fltr.src = hw_vsi_id;
4263 new_fltr.flag |= ICE_FLTR_RX;
4264 new_fltr.src = hw->port_info->lport;
4267 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4268 new_fltr.vsi_handle = vsi_handle;
4269 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4270 f_list_entry.fltr_info = new_fltr;
4272 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4273 if (status != ICE_SUCCESS)
4274 goto set_promisc_exit;
4282 * ice_set_vlan_vsi_promisc
4283 * @hw: pointer to the hardware structure
4284 * @vsi_handle: VSI handle to configure
4285 * @promisc_mask: mask of promiscuous config bits
4286 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4288 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4291 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4292 bool rm_vlan_promisc)
4294 struct ice_switch_info *sw = hw->switch_info;
4295 struct ice_fltr_list_entry *list_itr, *tmp;
4296 struct LIST_HEAD_TYPE vsi_list_head;
4297 struct LIST_HEAD_TYPE *vlan_head;
4298 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4299 enum ice_status status;
4302 INIT_LIST_HEAD(&vsi_list_head);
4303 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4304 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4305 ice_acquire_lock(vlan_lock);
4306 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4308 ice_release_lock(vlan_lock);
4310 goto free_fltr_list;
4312 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4314 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4315 if (rm_vlan_promisc)
4316 status = ice_clear_vsi_promisc(hw, vsi_handle,
4317 promisc_mask, vlan_id);
4319 status = ice_set_vsi_promisc(hw, vsi_handle,
4320 promisc_mask, vlan_id);
4326 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4327 ice_fltr_list_entry, list_entry) {
4328 LIST_DEL(&list_itr->list_entry);
4329 ice_free(hw, list_itr);
4335 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4336 * @hw: pointer to the hardware structure
4337 * @vsi_handle: VSI handle to remove filters from
4338 * @lkup: switch rule filter lookup type
4341 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4342 enum ice_sw_lkup_type lkup)
4344 struct ice_switch_info *sw = hw->switch_info;
4345 struct ice_fltr_list_entry *fm_entry;
4346 struct LIST_HEAD_TYPE remove_list_head;
4347 struct LIST_HEAD_TYPE *rule_head;
4348 struct ice_fltr_list_entry *tmp;
4349 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4350 enum ice_status status;
4352 INIT_LIST_HEAD(&remove_list_head);
4353 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4354 rule_head = &sw->recp_list[lkup].filt_rules;
4355 ice_acquire_lock(rule_lock);
4356 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4358 ice_release_lock(rule_lock);
4363 case ICE_SW_LKUP_MAC:
4364 ice_remove_mac(hw, &remove_list_head);
4366 case ICE_SW_LKUP_VLAN:
4367 ice_remove_vlan(hw, &remove_list_head);
4369 case ICE_SW_LKUP_PROMISC:
4370 case ICE_SW_LKUP_PROMISC_VLAN:
4371 ice_remove_promisc(hw, lkup, &remove_list_head);
4373 case ICE_SW_LKUP_MAC_VLAN:
4374 ice_remove_mac_vlan(hw, &remove_list_head);
4376 case ICE_SW_LKUP_ETHERTYPE:
4377 case ICE_SW_LKUP_ETHERTYPE_MAC:
4378 ice_remove_eth_mac(hw, &remove_list_head);
4380 case ICE_SW_LKUP_DFLT:
4381 ice_debug(hw, ICE_DBG_SW,
4382 "Remove filters for this lookup type hasn't been implemented yet\n");
4384 case ICE_SW_LKUP_LAST:
4385 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4389 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4390 ice_fltr_list_entry, list_entry) {
4391 LIST_DEL(&fm_entry->list_entry);
4392 ice_free(hw, fm_entry);
4397 * ice_remove_vsi_fltr - Remove all filters for a VSI
4398 * @hw: pointer to the hardware structure
4399 * @vsi_handle: VSI handle to remove filters from
4401 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4403 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4405 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4406 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4407 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4408 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4409 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4410 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4411 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4412 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4416 * ice_alloc_res_cntr - allocating resource counter
4417 * @hw: pointer to the hardware structure
4418 * @type: type of resource
4419 * @alloc_shared: if set it is shared else dedicated
4420 * @num_items: number of entries requested for FD resource type
4421 * @counter_id: counter index returned by AQ call
4424 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4427 struct ice_aqc_alloc_free_res_elem *buf;
4428 enum ice_status status;
4431 /* Allocate resource */
4432 buf_len = sizeof(*buf);
4433 buf = (struct ice_aqc_alloc_free_res_elem *)
4434 ice_malloc(hw, buf_len);
4436 return ICE_ERR_NO_MEMORY;
4438 buf->num_elems = CPU_TO_LE16(num_items);
4439 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4440 ICE_AQC_RES_TYPE_M) | alloc_shared);
4442 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4443 ice_aqc_opc_alloc_res, NULL);
4447 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4455 * ice_free_res_cntr - free resource counter
4456 * @hw: pointer to the hardware structure
4457 * @type: type of resource
4458 * @alloc_shared: if set it is shared else dedicated
4459 * @num_items: number of entries to be freed for FD resource type
4460 * @counter_id: counter ID resource which needs to be freed
4463 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4466 struct ice_aqc_alloc_free_res_elem *buf;
4467 enum ice_status status;
4471 buf_len = sizeof(*buf);
4472 buf = (struct ice_aqc_alloc_free_res_elem *)
4473 ice_malloc(hw, buf_len);
4475 return ICE_ERR_NO_MEMORY;
4477 buf->num_elems = CPU_TO_LE16(num_items);
4478 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4479 ICE_AQC_RES_TYPE_M) | alloc_shared);
4480 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4482 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4483 ice_aqc_opc_free_res, NULL);
4485 ice_debug(hw, ICE_DBG_SW,
4486 "counter resource could not be freed\n");
4493 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4494 * @hw: pointer to the hardware structure
4495 * @counter_id: returns counter index
4497 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4499 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4500 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4505 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4506 * @hw: pointer to the hardware structure
4507 * @counter_id: counter index to be freed
4509 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4511 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4512 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4517 * ice_alloc_res_lg_act - add large action resource
4518 * @hw: pointer to the hardware structure
4519 * @l_id: large action ID to fill it in
4520 * @num_acts: number of actions to hold with a large action entry
4522 static enum ice_status
4523 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4525 struct ice_aqc_alloc_free_res_elem *sw_buf;
4526 enum ice_status status;
4529 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4530 return ICE_ERR_PARAM;
4532 /* Allocate resource for large action */
4533 buf_len = sizeof(*sw_buf);
4534 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4535 ice_malloc(hw, buf_len);
4537 return ICE_ERR_NO_MEMORY;
4539 sw_buf->num_elems = CPU_TO_LE16(1);
4541 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4542 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4543 * If num_acts is greater than 2, then use
4544 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4545 * The num_acts cannot exceed 4. This was ensured at the
4546 * beginning of the function.
4549 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4550 else if (num_acts == 2)
4551 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4553 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4555 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4556 ice_aqc_opc_alloc_res, NULL);
4558 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4560 ice_free(hw, sw_buf);
4565 * ice_add_mac_with_sw_marker - add filter with sw marker
4566 * @hw: pointer to the hardware structure
4567 * @f_info: filter info structure containing the MAC filter information
4568 * @sw_marker: sw marker to tag the Rx descriptor with
4571 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4574 struct ice_switch_info *sw = hw->switch_info;
4575 struct ice_fltr_mgmt_list_entry *m_entry;
4576 struct ice_fltr_list_entry fl_info;
4577 struct LIST_HEAD_TYPE l_head;
4578 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4579 enum ice_status ret;
4583 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4584 return ICE_ERR_PARAM;
4586 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4587 return ICE_ERR_PARAM;
4589 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4590 return ICE_ERR_PARAM;
4592 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4593 return ICE_ERR_PARAM;
4594 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4596 /* Add filter if it doesn't exist so then the adding of large
4597 * action always results in update
4600 INIT_LIST_HEAD(&l_head);
4601 fl_info.fltr_info = *f_info;
4602 LIST_ADD(&fl_info.list_entry, &l_head);
4604 entry_exists = false;
4605 ret = ice_add_mac(hw, &l_head);
4606 if (ret == ICE_ERR_ALREADY_EXISTS)
4607 entry_exists = true;
4611 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4612 ice_acquire_lock(rule_lock);
4613 /* Get the book keeping entry for the filter */
4614 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4618 /* If counter action was enabled for this rule then don't enable
4619 * sw marker large action
4621 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4622 ret = ICE_ERR_PARAM;
4626 /* if same marker was added before */
4627 if (m_entry->sw_marker_id == sw_marker) {
4628 ret = ICE_ERR_ALREADY_EXISTS;
4632 /* Allocate a hardware table entry to hold large act. Three actions
4633 * for marker based large action
4635 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4639 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4642 /* Update the switch rule to add the marker action */
4643 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4645 ice_release_lock(rule_lock);
4650 ice_release_lock(rule_lock);
4651 /* only remove entry if it did not exist previously */
4653 ret = ice_remove_mac(hw, &l_head);
4659 * ice_add_mac_with_counter - add filter with counter enabled
4660 * @hw: pointer to the hardware structure
4661 * @f_info: pointer to filter info structure containing the MAC filter
4665 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4667 struct ice_switch_info *sw = hw->switch_info;
4668 struct ice_fltr_mgmt_list_entry *m_entry;
4669 struct ice_fltr_list_entry fl_info;
4670 struct LIST_HEAD_TYPE l_head;
4671 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4672 enum ice_status ret;
4677 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4678 return ICE_ERR_PARAM;
4680 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4681 return ICE_ERR_PARAM;
4683 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4684 return ICE_ERR_PARAM;
4685 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4687 entry_exist = false;
4689 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4691 /* Add filter if it doesn't exist so then the adding of large
4692 * action always results in update
4694 INIT_LIST_HEAD(&l_head);
4696 fl_info.fltr_info = *f_info;
4697 LIST_ADD(&fl_info.list_entry, &l_head);
4699 ret = ice_add_mac(hw, &l_head);
4700 if (ret == ICE_ERR_ALREADY_EXISTS)
4705 ice_acquire_lock(rule_lock);
4706 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4708 ret = ICE_ERR_BAD_PTR;
4712 /* Don't enable counter for a filter for which sw marker was enabled */
4713 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4714 ret = ICE_ERR_PARAM;
4718 /* If a counter was already enabled then don't need to add again */
4719 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4720 ret = ICE_ERR_ALREADY_EXISTS;
4724 /* Allocate a hardware table entry to VLAN counter */
4725 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4729 /* Allocate a hardware table entry to hold large act. Two actions for
4730 * counter based large action
4732 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4736 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4739 /* Update the switch rule to add the counter action */
4740 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4742 ice_release_lock(rule_lock);
4747 ice_release_lock(rule_lock);
4748 /* only remove entry if it did not exist previously */
4750 ret = ice_remove_mac(hw, &l_head);
4755 /* This is mapping table entry that maps every word within a given protocol
4756 * structure to the real byte offset as per the specification of that
4758 * for example dst address is 3 words in ethertype header and corresponding
4759 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4760 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4761 * matching entry describing its field. This needs to be updated if new
4762 * structure is added to that union.
4764 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4765 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4766 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4767 { ICE_ETYPE_OL, { 0 } },
4768 { ICE_VLAN_OFOS, { 0, 2 } },
4769 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4770 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4771 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4772 26, 28, 30, 32, 34, 36, 38 } },
4773 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4774 26, 28, 30, 32, 34, 36, 38 } },
4775 { ICE_TCP_IL, { 0, 2 } },
4776 { ICE_UDP_OF, { 0, 2 } },
4777 { ICE_UDP_ILOS, { 0, 2 } },
4778 { ICE_SCTP_IL, { 0, 2 } },
4779 { ICE_VXLAN, { 8, 10, 12, 14 } },
4780 { ICE_GENEVE, { 8, 10, 12, 14 } },
4781 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4782 { ICE_NVGRE, { 0, 2, 4, 6 } },
4783 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4784 { ICE_PPPOE, { 0, 2, 4, 6 } },
4787 /* The following table describes preferred grouping of recipes.
4788 * If a recipe that needs to be programmed is a superset or matches one of the
4789 * following combinations, then the recipe needs to be chained as per the
4793 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4794 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4795 { ICE_MAC_IL, ICE_MAC_IL_HW },
4796 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4797 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4798 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4799 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4800 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4801 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4802 { ICE_TCP_IL, ICE_TCP_IL_HW },
4803 { ICE_UDP_OF, ICE_UDP_OF_HW },
4804 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4805 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4806 { ICE_VXLAN, ICE_UDP_OF_HW },
4807 { ICE_GENEVE, ICE_UDP_OF_HW },
4808 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4809 { ICE_NVGRE, ICE_GRE_OF_HW },
4810 { ICE_GTP, ICE_UDP_OF_HW },
4811 { ICE_PPPOE, ICE_PPPOE_HW },
4815 * ice_find_recp - find a recipe
4816 * @hw: pointer to the hardware structure
4817 * @lkup_exts: extension sequence to match
4819 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4821 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4823 bool refresh_required = true;
4824 struct ice_sw_recipe *recp;
4827 /* Walk through existing recipes to find a match */
4828 recp = hw->switch_info->recp_list;
4829 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4830 /* If recipe was not created for this ID, in SW bookkeeping,
4831 * check if FW has an entry for this recipe. If the FW has an
4832 * entry update it in our SW bookkeeping and continue with the
4835 if (!recp[i].recp_created)
4836 if (ice_get_recp_frm_fw(hw,
4837 hw->switch_info->recp_list, i,
4841 /* Skip inverse action recipes */
4842 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4843 ICE_AQ_RECIPE_ACT_INV_ACT)
4846 /* if number of words we are looking for match */
4847 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4848 struct ice_fv_word *a = lkup_exts->fv_words;
4849 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4853 for (p = 0; p < lkup_exts->n_val_words; p++) {
4854 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4856 if (a[p].off == b[q].off &&
4857 a[p].prot_id == b[q].prot_id)
4858 /* Found the "p"th word in the
4863 /* After walking through all the words in the
4864 * "i"th recipe if "p"th word was not found then
4865 * this recipe is not what we are looking for.
4866 * So break out from this loop and try the next
4869 if (q >= recp[i].lkup_exts.n_val_words) {
4874 /* If for "i"th recipe the found was never set to false
4875 * then it means we found our match
4878 return i; /* Return the recipe ID */
4881 return ICE_MAX_NUM_RECIPES;
4885 * ice_prot_type_to_id - get protocol ID from protocol type
4886 * @type: protocol type
4887 * @id: pointer to variable that will receive the ID
4889 * Returns true if found, false otherwise
4891 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4895 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4896 if (ice_prot_id_tbl[i].type == type) {
4897 *id = ice_prot_id_tbl[i].protocol_id;
4904 * ice_find_valid_words - count valid words
4905 * @rule: advanced rule with lookup information
4906 * @lkup_exts: byte offset extractions of the words that are valid
4908 * calculate valid words in a lookup rule using mask value
4911 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4912 struct ice_prot_lkup_ext *lkup_exts)
4918 if (!ice_prot_type_to_id(rule->type, &prot_id))
4921 word = lkup_exts->n_val_words;
4923 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4924 if (((u16 *)&rule->m_u)[j] &&
4925 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4926 /* No more space to accommodate */
4927 if (word >= ICE_MAX_CHAIN_WORDS)
4929 lkup_exts->fv_words[word].off =
4930 ice_prot_ext[rule->type].offs[j];
4931 lkup_exts->fv_words[word].prot_id =
4932 ice_prot_id_tbl[rule->type].protocol_id;
4933 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4937 ret_val = word - lkup_exts->n_val_words;
4938 lkup_exts->n_val_words = word;
4944 * ice_create_first_fit_recp_def - Create a recipe grouping
4945 * @hw: pointer to the hardware structure
4946 * @lkup_exts: an array of protocol header extractions
4947 * @rg_list: pointer to a list that stores new recipe groups
4948 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4950 * Using first fit algorithm, take all the words that are still not done
4951 * and start grouping them in 4-word groups. Each group makes up one
4954 static enum ice_status
4955 ice_create_first_fit_recp_def(struct ice_hw *hw,
4956 struct ice_prot_lkup_ext *lkup_exts,
4957 struct LIST_HEAD_TYPE *rg_list,
4960 struct ice_pref_recipe_group *grp = NULL;
4965 /* Walk through every word in the rule to check if it is not done. If so
4966 * then this word needs to be part of a new recipe.
4968 for (j = 0; j < lkup_exts->n_val_words; j++)
4969 if (!ice_is_bit_set(lkup_exts->done, j)) {
4971 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4972 struct ice_recp_grp_entry *entry;
4974 entry = (struct ice_recp_grp_entry *)
4975 ice_malloc(hw, sizeof(*entry));
4977 return ICE_ERR_NO_MEMORY;
4978 LIST_ADD(&entry->l_entry, rg_list);
4979 grp = &entry->r_group;
4983 grp->pairs[grp->n_val_pairs].prot_id =
4984 lkup_exts->fv_words[j].prot_id;
4985 grp->pairs[grp->n_val_pairs].off =
4986 lkup_exts->fv_words[j].off;
4987 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4995 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4996 * @hw: pointer to the hardware structure
4997 * @fv_list: field vector with the extraction sequence information
4998 * @rg_list: recipe groupings with protocol-offset pairs
5000 * Helper function to fill in the field vector indices for protocol-offset
5001 * pairs. These indexes are then ultimately programmed into a recipe.
5003 static enum ice_status
5004 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
5005 struct LIST_HEAD_TYPE *rg_list)
5007 struct ice_sw_fv_list_entry *fv;
5008 struct ice_recp_grp_entry *rg;
5009 struct ice_fv_word *fv_ext;
5011 if (LIST_EMPTY(fv_list))
5014 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5015 fv_ext = fv->fv_ptr->ew;
5017 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5020 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5021 struct ice_fv_word *pr;
5026 pr = &rg->r_group.pairs[i];
5027 mask = rg->r_group.mask[i];
5029 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5030 if (fv_ext[j].prot_id == pr->prot_id &&
5031 fv_ext[j].off == pr->off) {
5034 /* Store index of field vector */
5036 /* Mask is given by caller as big
5037 * endian, but sent to FW as little
5040 rg->fv_mask[i] = mask << 8 | mask >> 8;
5044 /* Protocol/offset could not be found, caller gave an
5048 return ICE_ERR_PARAM;
5056 * ice_find_free_recp_res_idx - find free result indexes for recipe
5057 * @hw: pointer to hardware structure
5058 * @profiles: bitmap of profiles that will be associated with the new recipe
5059 * @free_idx: pointer to variable to receive the free index bitmap
5061 * The algorithm used here is:
5062 * 1. When creating a new recipe, create a set P which contains all
5063 * Profiles that will be associated with our new recipe
5065 * 2. For each Profile p in set P:
5066 * a. Add all recipes associated with Profile p into set R
5067 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5068 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5069 * i. Or just assume they all have the same possible indexes:
5071 * i.e., PossibleIndexes = 0x0000F00000000000
5073 * 3. For each Recipe r in set R:
5074 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5075 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5077 * FreeIndexes will contain the bits indicating the indexes free for use,
5078 * then the code needs to update the recipe[r].used_result_idx_bits to
5079 * indicate which indexes were selected for use by this recipe.
5082 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5083 ice_bitmap_t *free_idx)
5085 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5086 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5087 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5091 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5092 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5093 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5094 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5096 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5097 ice_set_bit(count, possible_idx);
5099 /* For each profile we are going to associate the recipe with, add the
5100 * recipes that are associated with that profile. This will give us
5101 * the set of recipes that our recipe may collide with. Also, determine
5102 * what possible result indexes are usable given this set of profiles.
5105 while (ICE_MAX_NUM_PROFILES >
5106 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5107 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5108 ICE_MAX_NUM_RECIPES);
5109 ice_and_bitmap(possible_idx, possible_idx,
5110 hw->switch_info->prof_res_bm[bit],
5115 /* For each recipe that our new recipe may collide with, determine
5116 * which indexes have been used.
5118 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5119 if (ice_is_bit_set(recipes, bit)) {
5120 ice_or_bitmap(used_idx, used_idx,
5121 hw->switch_info->recp_list[bit].res_idxs,
5125 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5127 /* return number of free indexes */
5130 while (ICE_MAX_FV_WORDS >
5131 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5140 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5141 * @hw: pointer to hardware structure
5142 * @rm: recipe management list entry
5143 * @match_tun: if field vector index for tunnel needs to be programmed
5144 * @profiles: bitmap of profiles that will be assocated.
5146 static enum ice_status
5147 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5148 bool match_tun, ice_bitmap_t *profiles)
5150 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5151 struct ice_aqc_recipe_data_elem *tmp;
5152 struct ice_aqc_recipe_data_elem *buf;
5153 struct ice_recp_grp_entry *entry;
5154 enum ice_status status;
5160 /* When more than one recipe are required, another recipe is needed to
5161 * chain them together. Matching a tunnel metadata ID takes up one of
5162 * the match fields in the chaining recipe reducing the number of
5163 * chained recipes by one.
5165 /* check number of free result indices */
5166 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5167 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5169 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5170 free_res_idx, rm->n_grp_count);
5172 if (rm->n_grp_count > 1) {
5173 if (rm->n_grp_count > free_res_idx)
5174 return ICE_ERR_MAX_LIMIT;
5179 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5180 ICE_MAX_NUM_RECIPES,
5183 return ICE_ERR_NO_MEMORY;
5185 buf = (struct ice_aqc_recipe_data_elem *)
5186 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5188 status = ICE_ERR_NO_MEMORY;
5192 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5193 recipe_count = ICE_MAX_NUM_RECIPES;
5194 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5196 if (status || recipe_count == 0)
5199 /* Allocate the recipe resources, and configure them according to the
5200 * match fields from protocol headers and extracted field vectors.
5202 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5203 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5206 status = ice_alloc_recipe(hw, &entry->rid);
5210 /* Clear the result index of the located recipe, as this will be
5211 * updated, if needed, later in the recipe creation process.
5213 tmp[0].content.result_indx = 0;
5215 buf[recps] = tmp[0];
5216 buf[recps].recipe_indx = (u8)entry->rid;
5217 /* if the recipe is a non-root recipe RID should be programmed
5218 * as 0 for the rules to be applied correctly.
5220 buf[recps].content.rid = 0;
5221 ice_memset(&buf[recps].content.lkup_indx, 0,
5222 sizeof(buf[recps].content.lkup_indx),
5225 /* All recipes use look-up index 0 to match switch ID. */
5226 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5227 buf[recps].content.mask[0] =
5228 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5229 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5232 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5233 buf[recps].content.lkup_indx[i] = 0x80;
5234 buf[recps].content.mask[i] = 0;
5237 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5238 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5239 buf[recps].content.mask[i + 1] =
5240 CPU_TO_LE16(entry->fv_mask[i]);
5243 if (rm->n_grp_count > 1) {
5244 /* Checks to see if there really is a valid result index
5247 if (chain_idx >= ICE_MAX_FV_WORDS) {
5248 ice_debug(hw, ICE_DBG_SW,
5249 "No chain index available\n");
5250 status = ICE_ERR_MAX_LIMIT;
5254 entry->chain_idx = chain_idx;
5255 buf[recps].content.result_indx =
5256 ICE_AQ_RECIPE_RESULT_EN |
5257 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5258 ICE_AQ_RECIPE_RESULT_DATA_M);
5259 ice_clear_bit(chain_idx, result_idx_bm);
5260 chain_idx = ice_find_first_bit(result_idx_bm,
5264 /* fill recipe dependencies */
5265 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5266 ICE_MAX_NUM_RECIPES);
5267 ice_set_bit(buf[recps].recipe_indx,
5268 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5269 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5273 if (rm->n_grp_count == 1) {
5274 rm->root_rid = buf[0].recipe_indx;
5275 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5276 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5277 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5278 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5279 sizeof(buf[0].recipe_bitmap),
5280 ICE_NONDMA_TO_NONDMA);
5282 status = ICE_ERR_BAD_PTR;
5285 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5286 * the recipe which is getting created if specified
5287 * by user. Usually any advanced switch filter, which results
5288 * into new extraction sequence, ended up creating a new recipe
5289 * of type ROOT and usually recipes are associated with profiles
5290 * Switch rule referreing newly created recipe, needs to have
5291 * either/or 'fwd' or 'join' priority, otherwise switch rule
5292 * evaluation will not happen correctly. In other words, if
5293 * switch rule to be evaluated on priority basis, then recipe
5294 * needs to have priority, otherwise it will be evaluated last.
5296 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5298 struct ice_recp_grp_entry *last_chain_entry;
5301 /* Allocate the last recipe that will chain the outcomes of the
5302 * other recipes together
5304 status = ice_alloc_recipe(hw, &rid);
5308 buf[recps].recipe_indx = (u8)rid;
5309 buf[recps].content.rid = (u8)rid;
5310 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5311 /* the new entry created should also be part of rg_list to
5312 * make sure we have complete recipe
5314 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5315 sizeof(*last_chain_entry));
5316 if (!last_chain_entry) {
5317 status = ICE_ERR_NO_MEMORY;
5320 last_chain_entry->rid = rid;
5321 ice_memset(&buf[recps].content.lkup_indx, 0,
5322 sizeof(buf[recps].content.lkup_indx),
5324 /* All recipes use look-up index 0 to match switch ID. */
5325 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5326 buf[recps].content.mask[0] =
5327 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5328 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5329 buf[recps].content.lkup_indx[i] =
5330 ICE_AQ_RECIPE_LKUP_IGNORE;
5331 buf[recps].content.mask[i] = 0;
5335 /* update r_bitmap with the recp that is used for chaining */
5336 ice_set_bit(rid, rm->r_bitmap);
5337 /* this is the recipe that chains all the other recipes so it
5338 * should not have a chaining ID to indicate the same
5340 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5341 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5343 last_chain_entry->fv_idx[i] = entry->chain_idx;
5344 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5345 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5346 ice_set_bit(entry->rid, rm->r_bitmap);
5348 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5349 if (sizeof(buf[recps].recipe_bitmap) >=
5350 sizeof(rm->r_bitmap)) {
5351 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5352 sizeof(buf[recps].recipe_bitmap),
5353 ICE_NONDMA_TO_NONDMA);
5355 status = ICE_ERR_BAD_PTR;
5358 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5360 /* To differentiate among different UDP tunnels, a meta data ID
5364 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5365 buf[recps].content.mask[i] =
5366 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5370 rm->root_rid = (u8)rid;
5372 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5376 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5377 ice_release_change_lock(hw);
5381 /* Every recipe that just got created add it to the recipe
5384 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5385 struct ice_switch_info *sw = hw->switch_info;
5386 bool is_root, idx_found = false;
5387 struct ice_sw_recipe *recp;
5388 u16 idx, buf_idx = 0;
5390 /* find buffer index for copying some data */
5391 for (idx = 0; idx < rm->n_grp_count; idx++)
5392 if (buf[idx].recipe_indx == entry->rid) {
5398 status = ICE_ERR_OUT_OF_RANGE;
5402 recp = &sw->recp_list[entry->rid];
5403 is_root = (rm->root_rid == entry->rid);
5404 recp->is_root = is_root;
5406 recp->root_rid = entry->rid;
5407 recp->big_recp = (is_root && rm->n_grp_count > 1);
5409 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5410 entry->r_group.n_val_pairs *
5411 sizeof(struct ice_fv_word),
5412 ICE_NONDMA_TO_NONDMA);
5414 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5415 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5417 /* Copy non-result fv index values and masks to recipe. This
5418 * call will also update the result recipe bitmask.
5420 ice_collect_result_idx(&buf[buf_idx], recp);
5422 /* for non-root recipes, also copy to the root, this allows
5423 * easier matching of a complete chained recipe
5426 ice_collect_result_idx(&buf[buf_idx],
5427 &sw->recp_list[rm->root_rid]);
5429 recp->n_ext_words = entry->r_group.n_val_pairs;
5430 recp->chain_idx = entry->chain_idx;
5431 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5432 recp->n_grp_count = rm->n_grp_count;
5433 recp->tun_type = rm->tun_type;
5434 recp->recp_created = true;
5449 * ice_create_recipe_group - creates recipe group
5450 * @hw: pointer to hardware structure
5451 * @rm: recipe management list entry
5452 * @lkup_exts: lookup elements
5454 static enum ice_status
5455 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5456 struct ice_prot_lkup_ext *lkup_exts)
5458 enum ice_status status;
5461 rm->n_grp_count = 0;
5463 /* Create recipes for words that are marked not done by packing them
5466 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5467 &rm->rg_list, &recp_count);
5469 rm->n_grp_count += recp_count;
5470 rm->n_ext_words = lkup_exts->n_val_words;
5471 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5472 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5473 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5474 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5481 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5482 * @hw: pointer to hardware structure
5483 * @lkups: lookup elements or match criteria for the advanced recipe, one
5484 * structure per protocol header
5485 * @lkups_cnt: number of protocols
5486 * @bm: bitmap of field vectors to consider
5487 * @fv_list: pointer to a list that holds the returned field vectors
5489 static enum ice_status
5490 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5491 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5493 enum ice_status status;
5497 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5499 return ICE_ERR_NO_MEMORY;
5501 for (i = 0; i < lkups_cnt; i++)
5502 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5503 status = ICE_ERR_CFG;
5507 /* Find field vectors that include all specified protocol types */
5508 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5511 ice_free(hw, prot_ids);
5516 * ice_add_special_words - Add words that are not protocols, such as metadata
5517 * @rinfo: other information regarding the rule e.g. priority and action info
5518 * @lkup_exts: lookup word structure
5520 static enum ice_status
5521 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5522 struct ice_prot_lkup_ext *lkup_exts)
5524 /* If this is a tunneled packet, then add recipe index to match the
5525 * tunnel bit in the packet metadata flags.
5527 if (rinfo->tun_type != ICE_NON_TUN) {
5528 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5529 u8 word = lkup_exts->n_val_words++;
5531 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5532 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5534 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5536 return ICE_ERR_MAX_LIMIT;
5543 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5544 * @hw: pointer to hardware structure
5545 * @rinfo: other information regarding the rule e.g. priority and action info
5546 * @bm: pointer to memory for returning the bitmap of field vectors
5549 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5552 enum ice_prof_type type;
5554 switch (rinfo->tun_type) {
5556 type = ICE_PROF_NON_TUN;
5558 case ICE_ALL_TUNNELS:
5559 type = ICE_PROF_TUN_ALL;
5561 case ICE_SW_TUN_VXLAN_GPE:
5562 case ICE_SW_TUN_GENEVE:
5563 case ICE_SW_TUN_VXLAN:
5564 case ICE_SW_TUN_UDP:
5565 case ICE_SW_TUN_GTP:
5566 type = ICE_PROF_TUN_UDP;
5568 case ICE_SW_TUN_NVGRE:
5569 type = ICE_PROF_TUN_GRE;
5571 case ICE_SW_TUN_PPPOE:
5572 type = ICE_PROF_TUN_PPPOE;
5574 case ICE_SW_TUN_AND_NON_TUN:
5576 type = ICE_PROF_ALL;
5580 ice_get_sw_fv_bitmap(hw, type, bm);
5584 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5585 * @hw: pointer to hardware structure
5586 * @lkups: lookup elements or match criteria for the advanced recipe, one
5587 * structure per protocol header
5588 * @lkups_cnt: number of protocols
5589 * @rinfo: other information regarding the rule e.g. priority and action info
5590 * @rid: return the recipe ID of the recipe created
5592 static enum ice_status
5593 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5594 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5596 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5597 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5598 struct ice_prot_lkup_ext *lkup_exts;
5599 struct ice_recp_grp_entry *r_entry;
5600 struct ice_sw_fv_list_entry *fvit;
5601 struct ice_recp_grp_entry *r_tmp;
5602 struct ice_sw_fv_list_entry *tmp;
5603 enum ice_status status = ICE_SUCCESS;
5604 struct ice_sw_recipe *rm;
5605 bool match_tun = false;
5609 return ICE_ERR_PARAM;
5611 lkup_exts = (struct ice_prot_lkup_ext *)
5612 ice_malloc(hw, sizeof(*lkup_exts));
5614 return ICE_ERR_NO_MEMORY;
5616 /* Determine the number of words to be matched and if it exceeds a
5617 * recipe's restrictions
5619 for (i = 0; i < lkups_cnt; i++) {
5622 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5623 status = ICE_ERR_CFG;
5624 goto err_free_lkup_exts;
5627 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5629 status = ICE_ERR_CFG;
5630 goto err_free_lkup_exts;
5634 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5636 status = ICE_ERR_NO_MEMORY;
5637 goto err_free_lkup_exts;
5640 /* Get field vectors that contain fields extracted from all the protocol
5641 * headers being programmed.
5643 INIT_LIST_HEAD(&rm->fv_list);
5644 INIT_LIST_HEAD(&rm->rg_list);
5646 /* Get bitmap of field vectors (profiles) that are compatible with the
5647 * rule request; only these will be searched in the subsequent call to
5650 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5652 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5656 /* Group match words into recipes using preferred recipe grouping
5659 status = ice_create_recipe_group(hw, rm, lkup_exts);
5663 /* There is only profile for UDP tunnels. So, it is necessary to use a
5664 * metadata ID flag to differentiate different tunnel types. A separate
5665 * recipe needs to be used for the metadata.
5667 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5668 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5669 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5672 /* set the recipe priority if specified */
5673 rm->priority = rinfo->priority ? rinfo->priority : 0;
5675 /* Find offsets from the field vector. Pick the first one for all the
5678 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5682 /* get bitmap of all profiles the recipe will be associated with */
5683 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5684 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5686 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5687 ice_set_bit((u16)fvit->profile_id, profiles);
5690 /* Create any special protocol/offset pairs, such as looking at tunnel
5691 * bits by extracting metadata
5693 status = ice_add_special_words(rinfo, lkup_exts);
5695 goto err_free_lkup_exts;
5697 /* Look for a recipe which matches our requested fv / mask list */
5698 *rid = ice_find_recp(hw, lkup_exts);
5699 if (*rid < ICE_MAX_NUM_RECIPES)
5700 /* Success if found a recipe that match the existing criteria */
5703 /* Recipe we need does not exist, add a recipe */
5704 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5708 /* Associate all the recipes created with all the profiles in the
5709 * common field vector.
5711 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5713 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5716 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5717 (u8 *)r_bitmap, NULL);
5721 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5722 ICE_MAX_NUM_RECIPES);
5723 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5727 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5730 ice_release_change_lock(hw);
5735 /* Update profile to recipe bitmap array */
5736 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5737 ICE_MAX_NUM_RECIPES);
5739 /* Update recipe to profile bitmap array */
5740 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5741 if (ice_is_bit_set(r_bitmap, j))
5742 ice_set_bit((u16)fvit->profile_id,
5743 recipe_to_profile[j]);
5746 *rid = rm->root_rid;
5747 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5748 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5750 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5751 ice_recp_grp_entry, l_entry) {
5752 LIST_DEL(&r_entry->l_entry);
5753 ice_free(hw, r_entry);
5756 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5758 LIST_DEL(&fvit->list_entry);
5763 ice_free(hw, rm->root_buf);
5768 ice_free(hw, lkup_exts);
5774 * ice_find_dummy_packet - find dummy packet by tunnel type
5776 * @lkups: lookup elements or match criteria for the advanced recipe, one
5777 * structure per protocol header
5778 * @lkups_cnt: number of protocols
5779 * @tun_type: tunnel type from the match criteria
5780 * @pkt: dummy packet to fill according to filter match criteria
5781 * @pkt_len: packet length of dummy packet
5782 * @offsets: pointer to receive the pointer to the offsets for the packet
5785 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5786 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5788 const struct ice_dummy_pkt_offsets **offsets)
5790 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5793 if (tun_type == ICE_SW_TUN_GTP) {
5794 *pkt = dummy_udp_gtp_packet;
5795 *pkt_len = sizeof(dummy_udp_gtp_packet);
5796 *offsets = dummy_udp_gtp_packet_offsets;
5799 if (tun_type == ICE_SW_TUN_PPPOE) {
5800 *pkt = dummy_pppoe_packet;
5801 *pkt_len = sizeof(dummy_pppoe_packet);
5802 *offsets = dummy_pppoe_packet_offsets;
5805 for (i = 0; i < lkups_cnt; i++) {
5806 if (lkups[i].type == ICE_UDP_ILOS)
5808 else if (lkups[i].type == ICE_TCP_IL)
5810 else if (lkups[i].type == ICE_IPV6_OFOS)
5812 else if (lkups[i].type == ICE_VLAN_OFOS)
5816 if (tun_type == ICE_ALL_TUNNELS) {
5817 *pkt = dummy_gre_udp_packet;
5818 *pkt_len = sizeof(dummy_gre_udp_packet);
5819 *offsets = dummy_gre_udp_packet_offsets;
5823 if (tun_type == ICE_SW_TUN_NVGRE) {
5825 *pkt = dummy_gre_tcp_packet;
5826 *pkt_len = sizeof(dummy_gre_tcp_packet);
5827 *offsets = dummy_gre_tcp_packet_offsets;
5831 *pkt = dummy_gre_udp_packet;
5832 *pkt_len = sizeof(dummy_gre_udp_packet);
5833 *offsets = dummy_gre_udp_packet_offsets;
5837 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5838 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5840 *pkt = dummy_udp_tun_tcp_packet;
5841 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5842 *offsets = dummy_udp_tun_tcp_packet_offsets;
5846 *pkt = dummy_udp_tun_udp_packet;
5847 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5848 *offsets = dummy_udp_tun_udp_packet_offsets;
5854 *pkt = dummy_vlan_udp_packet;
5855 *pkt_len = sizeof(dummy_vlan_udp_packet);
5856 *offsets = dummy_vlan_udp_packet_offsets;
5859 *pkt = dummy_udp_packet;
5860 *pkt_len = sizeof(dummy_udp_packet);
5861 *offsets = dummy_udp_packet_offsets;
5863 } else if (udp && ipv6) {
5865 *pkt = dummy_vlan_udp_ipv6_packet;
5866 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5867 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5870 *pkt = dummy_udp_ipv6_packet;
5871 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5872 *offsets = dummy_udp_ipv6_packet_offsets;
5874 } else if ((tcp && ipv6) || ipv6) {
5876 *pkt = dummy_vlan_tcp_ipv6_packet;
5877 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
5878 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
5881 *pkt = dummy_tcp_ipv6_packet;
5882 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5883 *offsets = dummy_tcp_ipv6_packet_offsets;
5888 *pkt = dummy_vlan_tcp_packet;
5889 *pkt_len = sizeof(dummy_vlan_tcp_packet);
5890 *offsets = dummy_vlan_tcp_packet_offsets;
5892 *pkt = dummy_tcp_packet;
5893 *pkt_len = sizeof(dummy_tcp_packet);
5894 *offsets = dummy_tcp_packet_offsets;
5899 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5901 * @lkups: lookup elements or match criteria for the advanced recipe, one
5902 * structure per protocol header
5903 * @lkups_cnt: number of protocols
5904 * @s_rule: stores rule information from the match criteria
5905 * @dummy_pkt: dummy packet to fill according to filter match criteria
5906 * @pkt_len: packet length of dummy packet
5907 * @offsets: offset info for the dummy packet
5909 static enum ice_status
5910 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5911 struct ice_aqc_sw_rules_elem *s_rule,
5912 const u8 *dummy_pkt, u16 pkt_len,
5913 const struct ice_dummy_pkt_offsets *offsets)
5918 /* Start with a packet with a pre-defined/dummy content. Then, fill
5919 * in the header values to be looked up or matched.
5921 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5923 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5925 for (i = 0; i < lkups_cnt; i++) {
5926 enum ice_protocol_type type;
5927 u16 offset = 0, len = 0, j;
5930 /* find the start of this layer; it should be found since this
5931 * was already checked when search for the dummy packet
5933 type = lkups[i].type;
5934 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5935 if (type == offsets[j].type) {
5936 offset = offsets[j].offset;
5941 /* this should never happen in a correct calling sequence */
5943 return ICE_ERR_PARAM;
5945 switch (lkups[i].type) {
5948 len = sizeof(struct ice_ether_hdr);
5951 len = sizeof(struct ice_ethtype_hdr);
5954 len = sizeof(struct ice_vlan_hdr);
5958 len = sizeof(struct ice_ipv4_hdr);
5962 len = sizeof(struct ice_ipv6_hdr);
5967 len = sizeof(struct ice_l4_hdr);
5970 len = sizeof(struct ice_sctp_hdr);
5973 len = sizeof(struct ice_nvgre);
5978 len = sizeof(struct ice_udp_tnl_hdr);
5982 len = sizeof(struct ice_udp_gtp_hdr);
5985 len = sizeof(struct ice_pppoe_hdr);
5988 return ICE_ERR_PARAM;
5991 /* the length should be a word multiple */
5992 if (len % ICE_BYTES_PER_WORD)
5995 /* We have the offset to the header start, the length, the
5996 * caller's header values and mask. Use this information to
5997 * copy the data into the dummy packet appropriately based on
5998 * the mask. Note that we need to only write the bits as
5999 * indicated by the mask to make sure we don't improperly write
6000 * over any significant packet data.
6002 for (j = 0; j < len / sizeof(u16); j++)
6003 if (((u16 *)&lkups[i].m_u)[j])
6004 ((u16 *)(pkt + offset))[j] =
6005 (((u16 *)(pkt + offset))[j] &
6006 ~((u16 *)&lkups[i].m_u)[j]) |
6007 (((u16 *)&lkups[i].h_u)[j] &
6008 ((u16 *)&lkups[i].m_u)[j]);
6011 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6017 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6018 * @hw: pointer to the hardware structure
6019 * @tun_type: tunnel type
6020 * @pkt: dummy packet to fill in
6021 * @offsets: offset info for the dummy packet
6023 static enum ice_status
6024 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6025 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6030 case ICE_SW_TUN_AND_NON_TUN:
6031 case ICE_SW_TUN_VXLAN_GPE:
6032 case ICE_SW_TUN_VXLAN:
6033 case ICE_SW_TUN_UDP:
6034 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6038 case ICE_SW_TUN_GENEVE:
6039 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6044 /* Nothing needs to be done for this tunnel type */
6048 /* Find the outer UDP protocol header and insert the port number */
6049 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6050 if (offsets[i].type == ICE_UDP_OF) {
6051 struct ice_l4_hdr *hdr;
6054 offset = offsets[i].offset;
6055 hdr = (struct ice_l4_hdr *)&pkt[offset];
6056 hdr->dst_port = CPU_TO_BE16(open_port);
6066 * ice_find_adv_rule_entry - Search a rule entry
6067 * @hw: pointer to the hardware structure
6068 * @lkups: lookup elements or match criteria for the advanced recipe, one
6069 * structure per protocol header
6070 * @lkups_cnt: number of protocols
6071 * @recp_id: recipe ID for which we are finding the rule
6072 * @rinfo: other information regarding the rule e.g. priority and action info
6074 * Helper function to search for a given advance rule entry
6075 * Returns pointer to entry storing the rule if found
6077 static struct ice_adv_fltr_mgmt_list_entry *
6078 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6079 u16 lkups_cnt, u8 recp_id,
6080 struct ice_adv_rule_info *rinfo)
6082 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6083 struct ice_switch_info *sw = hw->switch_info;
6086 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6087 ice_adv_fltr_mgmt_list_entry, list_entry) {
6088 bool lkups_matched = true;
6090 if (lkups_cnt != list_itr->lkups_cnt)
6092 for (i = 0; i < list_itr->lkups_cnt; i++)
6093 if (memcmp(&list_itr->lkups[i], &lkups[i],
6095 lkups_matched = false;
6098 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6099 rinfo->tun_type == list_itr->rule_info.tun_type &&
6107 * ice_adv_add_update_vsi_list
6108 * @hw: pointer to the hardware structure
6109 * @m_entry: pointer to current adv filter management list entry
6110 * @cur_fltr: filter information from the book keeping entry
6111 * @new_fltr: filter information with the new VSI to be added
6113 * Call AQ command to add or update previously created VSI list with new VSI.
6115 * Helper function to do book keeping associated with adding filter information
6116 * The algorithm to do the booking keeping is described below :
6117 * When a VSI needs to subscribe to a given advanced filter
6118 * if only one VSI has been added till now
6119 * Allocate a new VSI list and add two VSIs
6120 * to this list using switch rule command
6121 * Update the previously created switch rule with the
6122 * newly created VSI list ID
6123 * if a VSI list was previously created
6124 * Add the new VSI to the previously created VSI list set
6125 * using the update switch rule command
6127 static enum ice_status
6128 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6129 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6130 struct ice_adv_rule_info *cur_fltr,
6131 struct ice_adv_rule_info *new_fltr)
6133 enum ice_status status;
6134 u16 vsi_list_id = 0;
6136 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6137 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6138 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6139 return ICE_ERR_NOT_IMPL;
6141 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6142 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6143 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6144 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6145 return ICE_ERR_NOT_IMPL;
6147 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6148 /* Only one entry existed in the mapping and it was not already
6149 * a part of a VSI list. So, create a VSI list with the old and
6152 struct ice_fltr_info tmp_fltr;
6153 u16 vsi_handle_arr[2];
6155 /* A rule already exists with the new VSI being added */
6156 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6157 new_fltr->sw_act.fwd_id.hw_vsi_id)
6158 return ICE_ERR_ALREADY_EXISTS;
6160 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6161 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6162 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6168 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6169 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6170 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6171 /* Update the previous switch rule of "forward to VSI" to
6174 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6178 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6179 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6180 m_entry->vsi_list_info =
6181 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6184 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6186 if (!m_entry->vsi_list_info)
6189 /* A rule already exists with the new VSI being added */
6190 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6193 /* Update the previously created VSI list set with
6194 * the new VSI ID passed in
6196 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6198 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6200 ice_aqc_opc_update_sw_rules,
6202 /* update VSI list mapping info with new VSI ID */
6204 ice_set_bit(vsi_handle,
6205 m_entry->vsi_list_info->vsi_map);
6208 m_entry->vsi_count++;
6213 * ice_add_adv_rule - helper function to create an advanced switch rule
6214 * @hw: pointer to the hardware structure
6215 * @lkups: information on the words that needs to be looked up. All words
6216 * together makes one recipe
6217 * @lkups_cnt: num of entries in the lkups array
6218 * @rinfo: other information related to the rule that needs to be programmed
6219 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6220 * ignored is case of error.
6222 * This function can program only 1 rule at a time. The lkups is used to
6223 * describe the all the words that forms the "lookup" portion of the recipe.
6224 * These words can span multiple protocols. Callers to this function need to
6225 * pass in a list of protocol headers with lookup information along and mask
6226 * that determines which words are valid from the given protocol header.
6227 * rinfo describes other information related to this rule such as forwarding
6228 * IDs, priority of this rule, etc.
6231 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6232 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6233 struct ice_rule_query_data *added_entry)
6235 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6236 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6237 const struct ice_dummy_pkt_offsets *pkt_offsets;
6238 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6239 struct LIST_HEAD_TYPE *rule_head;
6240 struct ice_switch_info *sw;
6241 enum ice_status status;
6242 const u8 *pkt = NULL;
6247 /* Initialize profile to result index bitmap */
6248 if (!hw->switch_info->prof_res_bm_init) {
6249 hw->switch_info->prof_res_bm_init = 1;
6250 ice_init_prof_result_bm(hw);
6254 return ICE_ERR_PARAM;
6256 /* get # of words we need to match */
6258 for (i = 0; i < lkups_cnt; i++) {
6261 ptr = (u16 *)&lkups[i].m_u;
6262 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6266 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6267 return ICE_ERR_PARAM;
6269 /* make sure that we can locate a dummy packet */
6270 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6273 status = ICE_ERR_PARAM;
6274 goto err_ice_add_adv_rule;
6277 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6278 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6279 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6280 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6283 vsi_handle = rinfo->sw_act.vsi_handle;
6284 if (!ice_is_vsi_valid(hw, vsi_handle))
6285 return ICE_ERR_PARAM;
6287 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6288 rinfo->sw_act.fwd_id.hw_vsi_id =
6289 ice_get_hw_vsi_num(hw, vsi_handle);
6290 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6291 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6293 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6296 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6298 /* we have to add VSI to VSI_LIST and increment vsi_count.
6299 * Also Update VSI list so that we can change forwarding rule
6300 * if the rule already exists, we will check if it exists with
6301 * same vsi_id, if not then add it to the VSI list if it already
6302 * exists if not then create a VSI list and add the existing VSI
6303 * ID and the new VSI ID to the list
6304 * We will add that VSI to the list
6306 status = ice_adv_add_update_vsi_list(hw, m_entry,
6307 &m_entry->rule_info,
6310 added_entry->rid = rid;
6311 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6312 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6316 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6317 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6319 return ICE_ERR_NO_MEMORY;
6320 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6321 switch (rinfo->sw_act.fltr_act) {
6322 case ICE_FWD_TO_VSI:
6323 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6324 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6325 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6328 act |= ICE_SINGLE_ACT_TO_Q;
6329 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6330 ICE_SINGLE_ACT_Q_INDEX_M;
6332 case ICE_FWD_TO_QGRP:
6333 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6334 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6335 act |= ICE_SINGLE_ACT_TO_Q;
6336 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6337 ICE_SINGLE_ACT_Q_INDEX_M;
6338 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6339 ICE_SINGLE_ACT_Q_REGION_M;
6341 case ICE_DROP_PACKET:
6342 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6343 ICE_SINGLE_ACT_VALID_BIT;
6346 status = ICE_ERR_CFG;
6347 goto err_ice_add_adv_rule;
6350 /* set the rule LOOKUP type based on caller specified 'RX'
6351 * instead of hardcoding it to be either LOOKUP_TX/RX
6353 * for 'RX' set the source to be the port number
6354 * for 'TX' set the source to be the source HW VSI number (determined
6358 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6359 s_rule->pdata.lkup_tx_rx.src =
6360 CPU_TO_LE16(hw->port_info->lport);
6362 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6363 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6366 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6367 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6369 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6370 pkt_len, pkt_offsets);
6372 goto err_ice_add_adv_rule;
6374 if (rinfo->tun_type != ICE_NON_TUN &&
6375 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6376 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6377 s_rule->pdata.lkup_tx_rx.hdr,
6380 goto err_ice_add_adv_rule;
6383 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6384 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6387 goto err_ice_add_adv_rule;
6388 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6389 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6391 status = ICE_ERR_NO_MEMORY;
6392 goto err_ice_add_adv_rule;
6395 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6396 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6397 ICE_NONDMA_TO_NONDMA);
6398 if (!adv_fltr->lkups) {
6399 status = ICE_ERR_NO_MEMORY;
6400 goto err_ice_add_adv_rule;
6403 adv_fltr->lkups_cnt = lkups_cnt;
6404 adv_fltr->rule_info = *rinfo;
6405 adv_fltr->rule_info.fltr_rule_id =
6406 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6407 sw = hw->switch_info;
6408 sw->recp_list[rid].adv_rule = true;
6409 rule_head = &sw->recp_list[rid].filt_rules;
6411 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6412 struct ice_fltr_info tmp_fltr;
6414 tmp_fltr.fltr_rule_id =
6415 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6416 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6417 tmp_fltr.fwd_id.hw_vsi_id =
6418 ice_get_hw_vsi_num(hw, vsi_handle);
6419 tmp_fltr.vsi_handle = vsi_handle;
6420 /* Update the previous switch rule of "forward to VSI" to
6423 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6425 goto err_ice_add_adv_rule;
6426 adv_fltr->vsi_count = 1;
6429 /* Add rule entry to book keeping list */
6430 LIST_ADD(&adv_fltr->list_entry, rule_head);
6432 added_entry->rid = rid;
6433 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6434 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6436 err_ice_add_adv_rule:
6437 if (status && adv_fltr) {
6438 ice_free(hw, adv_fltr->lkups);
6439 ice_free(hw, adv_fltr);
6442 ice_free(hw, s_rule);
6448 * ice_adv_rem_update_vsi_list
6449 * @hw: pointer to the hardware structure
6450 * @vsi_handle: VSI handle of the VSI to remove
6451 * @fm_list: filter management entry for which the VSI list management needs to
6454 static enum ice_status
6455 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6456 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6458 struct ice_vsi_list_map_info *vsi_list_info;
6459 enum ice_sw_lkup_type lkup_type;
6460 enum ice_status status;
6463 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6464 fm_list->vsi_count == 0)
6465 return ICE_ERR_PARAM;
6467 /* A rule with the VSI being removed does not exist */
6468 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6469 return ICE_ERR_DOES_NOT_EXIST;
6471 lkup_type = ICE_SW_LKUP_LAST;
6472 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6473 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6474 ice_aqc_opc_update_sw_rules,
6479 fm_list->vsi_count--;
6480 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6481 vsi_list_info = fm_list->vsi_list_info;
6482 if (fm_list->vsi_count == 1) {
6483 struct ice_fltr_info tmp_fltr;
6486 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6488 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6489 return ICE_ERR_OUT_OF_RANGE;
6491 /* Make sure VSI list is empty before removing it below */
6492 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6494 ice_aqc_opc_update_sw_rules,
6498 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6499 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6500 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6501 tmp_fltr.fwd_id.hw_vsi_id =
6502 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6503 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6504 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6506 /* Update the previous switch rule of "MAC forward to VSI" to
6507 * "MAC fwd to VSI list"
6509 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6511 ice_debug(hw, ICE_DBG_SW,
6512 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6513 tmp_fltr.fwd_id.hw_vsi_id, status);
6517 /* Remove the VSI list since it is no longer used */
6518 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6520 ice_debug(hw, ICE_DBG_SW,
6521 "Failed to remove VSI list %d, error %d\n",
6522 vsi_list_id, status);
6526 LIST_DEL(&vsi_list_info->list_entry);
6527 ice_free(hw, vsi_list_info);
6528 fm_list->vsi_list_info = NULL;
6535 * ice_rem_adv_rule - removes existing advanced switch rule
6536 * @hw: pointer to the hardware structure
6537 * @lkups: information on the words that needs to be looked up. All words
6538 * together makes one recipe
6539 * @lkups_cnt: num of entries in the lkups array
6540 * @rinfo: Its the pointer to the rule information for the rule
6542 * This function can be used to remove 1 rule at a time. The lkups is
6543 * used to describe all the words that forms the "lookup" portion of the
6544 * rule. These words can span multiple protocols. Callers to this function
6545 * need to pass in a list of protocol headers with lookup information along
6546 * and mask that determines which words are valid from the given protocol
6547 * header. rinfo describes other information related to this rule such as
6548 * forwarding IDs, priority of this rule, etc.
6551 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6552 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6554 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6555 struct ice_prot_lkup_ext lkup_exts;
6556 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6557 enum ice_status status = ICE_SUCCESS;
6558 bool remove_rule = false;
6559 u16 i, rid, vsi_handle;
6561 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6562 for (i = 0; i < lkups_cnt; i++) {
6565 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6568 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6573 /* Create any special protocol/offset pairs, such as looking at tunnel
6574 * bits by extracting metadata
6576 status = ice_add_special_words(rinfo, &lkup_exts);
6580 rid = ice_find_recp(hw, &lkup_exts);
6581 /* If did not find a recipe that match the existing criteria */
6582 if (rid == ICE_MAX_NUM_RECIPES)
6583 return ICE_ERR_PARAM;
6585 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6586 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6587 /* the rule is already removed */
6590 ice_acquire_lock(rule_lock);
6591 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6593 } else if (list_elem->vsi_count > 1) {
6594 list_elem->vsi_list_info->ref_cnt--;
6595 remove_rule = false;
6596 vsi_handle = rinfo->sw_act.vsi_handle;
6597 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6599 vsi_handle = rinfo->sw_act.vsi_handle;
6600 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6602 ice_release_lock(rule_lock);
6605 if (list_elem->vsi_count == 0)
6608 ice_release_lock(rule_lock);
6610 struct ice_aqc_sw_rules_elem *s_rule;
6613 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6615 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6618 return ICE_ERR_NO_MEMORY;
6619 s_rule->pdata.lkup_tx_rx.act = 0;
6620 s_rule->pdata.lkup_tx_rx.index =
6621 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6622 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6623 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6625 ice_aqc_opc_remove_sw_rules, NULL);
6626 if (status == ICE_SUCCESS) {
6627 ice_acquire_lock(rule_lock);
6628 LIST_DEL(&list_elem->list_entry);
6629 ice_free(hw, list_elem->lkups);
6630 ice_free(hw, list_elem);
6631 ice_release_lock(rule_lock);
6633 ice_free(hw, s_rule);
6639 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6640 * @hw: pointer to the hardware structure
6641 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6643 * This function is used to remove 1 rule at a time. The removal is based on
6644 * the remove_entry parameter. This function will remove rule for a given
6645 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6648 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6649 struct ice_rule_query_data *remove_entry)
6651 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6652 struct LIST_HEAD_TYPE *list_head;
6653 struct ice_adv_rule_info rinfo;
6654 struct ice_switch_info *sw;
6656 sw = hw->switch_info;
6657 if (!sw->recp_list[remove_entry->rid].recp_created)
6658 return ICE_ERR_PARAM;
6659 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6660 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6662 if (list_itr->rule_info.fltr_rule_id ==
6663 remove_entry->rule_id) {
6664 rinfo = list_itr->rule_info;
6665 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6666 return ice_rem_adv_rule(hw, list_itr->lkups,
6667 list_itr->lkups_cnt, &rinfo);
6670 return ICE_ERR_PARAM;
6674 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6676 * @hw: pointer to the hardware structure
6677 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6679 * This function is used to remove all the rules for a given VSI and as soon
6680 * as removing a rule fails, it will return immediately with the error code,
6681 * else it will return ICE_SUCCESS
6684 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6686 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6687 struct ice_vsi_list_map_info *map_info;
6688 struct LIST_HEAD_TYPE *list_head;
6689 struct ice_adv_rule_info rinfo;
6690 struct ice_switch_info *sw;
6691 enum ice_status status;
6692 u16 vsi_list_id = 0;
6695 sw = hw->switch_info;
6696 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6697 if (!sw->recp_list[rid].recp_created)
6699 if (!sw->recp_list[rid].adv_rule)
6701 list_head = &sw->recp_list[rid].filt_rules;
6703 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6704 ice_adv_fltr_mgmt_list_entry, list_entry) {
6705 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6709 rinfo = list_itr->rule_info;
6710 rinfo.sw_act.vsi_handle = vsi_handle;
6711 status = ice_rem_adv_rule(hw, list_itr->lkups,
6712 list_itr->lkups_cnt, &rinfo);
6722 * ice_replay_fltr - Replay all the filters stored by a specific list head
6723 * @hw: pointer to the hardware structure
6724 * @list_head: list for which filters needs to be replayed
6725 * @recp_id: Recipe ID for which rules need to be replayed
6727 static enum ice_status
6728 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6730 struct ice_fltr_mgmt_list_entry *itr;
6731 struct LIST_HEAD_TYPE l_head;
6732 enum ice_status status = ICE_SUCCESS;
6734 if (LIST_EMPTY(list_head))
6737 /* Move entries from the given list_head to a temporary l_head so that
6738 * they can be replayed. Otherwise when trying to re-add the same
6739 * filter, the function will return already exists
6741 LIST_REPLACE_INIT(list_head, &l_head);
6743 /* Mark the given list_head empty by reinitializing it so filters
6744 * could be added again by *handler
6746 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6748 struct ice_fltr_list_entry f_entry;
6750 f_entry.fltr_info = itr->fltr_info;
6751 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6752 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6753 if (status != ICE_SUCCESS)
6758 /* Add a filter per VSI separately */
6763 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6765 if (!ice_is_vsi_valid(hw, vsi_handle))
6768 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6769 f_entry.fltr_info.vsi_handle = vsi_handle;
6770 f_entry.fltr_info.fwd_id.hw_vsi_id =
6771 ice_get_hw_vsi_num(hw, vsi_handle);
6772 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6773 if (recp_id == ICE_SW_LKUP_VLAN)
6774 status = ice_add_vlan_internal(hw, &f_entry);
6776 status = ice_add_rule_internal(hw, recp_id,
6778 if (status != ICE_SUCCESS)
6783 /* Clear the filter management list */
6784 ice_rem_sw_rule_info(hw, &l_head);
6789 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6790 * @hw: pointer to the hardware structure
6792 * NOTE: This function does not clean up partially added filters on error.
6793 * It is up to caller of the function to issue a reset or fail early.
6795 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6797 struct ice_switch_info *sw = hw->switch_info;
6798 enum ice_status status = ICE_SUCCESS;
6801 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6802 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6804 status = ice_replay_fltr(hw, i, head);
6805 if (status != ICE_SUCCESS)
6812 * ice_replay_vsi_fltr - Replay filters for requested VSI
6813 * @hw: pointer to the hardware structure
6814 * @vsi_handle: driver VSI handle
6815 * @recp_id: Recipe ID for which rules need to be replayed
6816 * @list_head: list for which filters need to be replayed
6818 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6819 * It is required to pass valid VSI handle.
6821 static enum ice_status
6822 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6823 struct LIST_HEAD_TYPE *list_head)
6825 struct ice_fltr_mgmt_list_entry *itr;
6826 enum ice_status status = ICE_SUCCESS;
6829 if (LIST_EMPTY(list_head))
6831 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6833 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6835 struct ice_fltr_list_entry f_entry;
6837 f_entry.fltr_info = itr->fltr_info;
6838 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6839 itr->fltr_info.vsi_handle == vsi_handle) {
6840 /* update the src in case it is VSI num */
6841 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6842 f_entry.fltr_info.src = hw_vsi_id;
6843 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6844 if (status != ICE_SUCCESS)
6848 if (!itr->vsi_list_info ||
6849 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6851 /* Clearing it so that the logic can add it back */
6852 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6853 f_entry.fltr_info.vsi_handle = vsi_handle;
6854 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6855 /* update the src in case it is VSI num */
6856 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6857 f_entry.fltr_info.src = hw_vsi_id;
6858 if (recp_id == ICE_SW_LKUP_VLAN)
6859 status = ice_add_vlan_internal(hw, &f_entry);
6861 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6862 if (status != ICE_SUCCESS)
6870 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6871 * @hw: pointer to the hardware structure
6872 * @vsi_handle: driver VSI handle
6873 * @list_head: list for which filters need to be replayed
6875 * Replay the advanced rule for the given VSI.
6877 static enum ice_status
6878 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6879 struct LIST_HEAD_TYPE *list_head)
6881 struct ice_rule_query_data added_entry = { 0 };
6882 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6883 enum ice_status status = ICE_SUCCESS;
6885 if (LIST_EMPTY(list_head))
6887 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6889 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6890 u16 lk_cnt = adv_fltr->lkups_cnt;
6892 if (vsi_handle != rinfo->sw_act.vsi_handle)
6894 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6903 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6904 * @hw: pointer to the hardware structure
6905 * @vsi_handle: driver VSI handle
6907 * Replays filters for requested VSI via vsi_handle.
6909 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6911 struct ice_switch_info *sw = hw->switch_info;
6912 enum ice_status status;
6915 /* Update the recipes that were created */
6916 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6917 struct LIST_HEAD_TYPE *head;
6919 head = &sw->recp_list[i].filt_replay_rules;
6920 if (!sw->recp_list[i].adv_rule)
6921 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6923 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6924 if (status != ICE_SUCCESS)
6932 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6933 * @hw: pointer to the HW struct
6935 * Deletes the filter replay rules.
6937 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6939 struct ice_switch_info *sw = hw->switch_info;
6945 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6946 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6947 struct LIST_HEAD_TYPE *l_head;
6949 l_head = &sw->recp_list[i].filt_replay_rules;
6950 if (!sw->recp_list[i].adv_rule)
6951 ice_rem_sw_rule_info(hw, l_head);
6953 ice_rem_adv_rule_info(hw, l_head);