1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
9 #define ICE_ETH_DA_OFFSET 0
10 #define ICE_ETH_ETHTYPE_OFFSET 12
11 #define ICE_ETH_VLAN_TCI_OFFSET 14
12 #define ICE_MAX_VLAN_ID 0xFFF
14 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
15 * struct to configure any switch filter rules.
16 * {DA (6 bytes), SA(6 bytes),
17 * Ether type (2 bytes for header without VLAN tag) OR
18 * VLAN tag (4 bytes for header with VLAN tag) }
20 * Word on Hardcoded values
21 * byte 0 = 0x2: to identify it as locally administered DA MAC
22 * byte 6 = 0x2: to identify it as locally administered SA MAC
23 * byte 12 = 0x81 & byte 13 = 0x00:
24 * In case of VLAN filter first two bytes defines ether type (0x8100)
25 * and remaining two bytes are placeholder for programming a given VLAN ID
26 * In case of Ether type filter it is treated as header without VLAN tag
27 * and byte 12 and 13 is used to program a given Ether type instead
29 #define DUMMY_ETH_HDR_LEN 16
30 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
35 (sizeof(struct ice_aqc_sw_rules_elem) - \
36 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
37 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
38 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
39 (sizeof(struct ice_aqc_sw_rules_elem) - \
40 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
41 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
42 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
43 (sizeof(struct ice_aqc_sw_rules_elem) - \
44 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
45 sizeof(struct ice_sw_rule_lg_act) - \
46 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
47 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
48 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
49 (sizeof(struct ice_aqc_sw_rules_elem) - \
50 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
51 sizeof(struct ice_sw_rule_vsi_list) - \
52 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
53 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55 struct ice_dummy_pkt_offsets {
56 enum ice_protocol_type type;
57 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
60 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
63 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
71 static const u8 dummy_gre_tcp_packet[] = {
72 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
73 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00,
76 0x08, 0x00, /* ICE_ETYPE_OL 12 */
78 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
79 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x2F, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00,
84 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
85 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
88 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00,
92 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
93 0x00, 0x00, 0x00, 0x00,
94 0x00, 0x06, 0x00, 0x00,
95 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
99 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00,
101 0x50, 0x02, 0x20, 0x00,
102 0x00, 0x00, 0x00, 0x00
105 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
107 { ICE_ETYPE_OL, 12 },
108 { ICE_IPV4_OFOS, 14 },
112 { ICE_UDP_ILOS, 76 },
113 { ICE_PROTOCOL_LAST, 0 },
116 static const u8 dummy_gre_udp_packet[] = {
117 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
118 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x00, 0x00,
121 0x08, 0x00, /* ICE_ETYPE_OL 12 */
123 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
124 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x2F, 0x00, 0x00,
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
129 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
130 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
137 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
138 0x00, 0x00, 0x00, 0x00,
139 0x00, 0x11, 0x00, 0x00,
140 0x00, 0x00, 0x00, 0x00,
141 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
144 0x00, 0x08, 0x00, 0x00,
147 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
149 { ICE_ETYPE_OL, 12 },
150 { ICE_IPV4_OFOS, 14 },
154 { ICE_VXLAN_GPE, 42 },
158 { ICE_PROTOCOL_LAST, 0 },
161 static const u8 dummy_udp_tun_tcp_packet[] = {
162 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x08, 0x00, /* ICE_ETYPE_OL 12 */
168 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
169 0x00, 0x01, 0x00, 0x00,
170 0x40, 0x11, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
175 0x00, 0x46, 0x00, 0x00,
177 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
178 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
181 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
185 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
186 0x00, 0x01, 0x00, 0x00,
187 0x40, 0x06, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
192 0x00, 0x00, 0x00, 0x00,
193 0x00, 0x00, 0x00, 0x00,
194 0x50, 0x02, 0x20, 0x00,
195 0x00, 0x00, 0x00, 0x00
198 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
200 { ICE_ETYPE_OL, 12 },
201 { ICE_IPV4_OFOS, 14 },
205 { ICE_VXLAN_GPE, 42 },
208 { ICE_UDP_ILOS, 84 },
209 { ICE_PROTOCOL_LAST, 0 },
212 static const u8 dummy_udp_tun_udp_packet[] = {
213 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
214 0x00, 0x00, 0x00, 0x00,
215 0x00, 0x00, 0x00, 0x00,
217 0x08, 0x00, /* ICE_ETYPE_OL 12 */
219 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
220 0x00, 0x01, 0x00, 0x00,
221 0x00, 0x11, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
226 0x00, 0x3a, 0x00, 0x00,
228 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
236 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
237 0x00, 0x01, 0x00, 0x00,
238 0x00, 0x11, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
243 0x00, 0x08, 0x00, 0x00,
246 /* offset info for MAC + IPv4 + UDP dummy packet */
247 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
249 { ICE_ETYPE_OL, 12 },
250 { ICE_IPV4_OFOS, 14 },
251 { ICE_UDP_ILOS, 34 },
252 { ICE_PROTOCOL_LAST, 0 },
255 /* Dummy packet for MAC + IPv4 + UDP */
256 static const u8 dummy_udp_packet[] = {
257 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
258 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00,
261 0x08, 0x00, /* ICE_ETYPE_OL 12 */
263 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
264 0x00, 0x01, 0x00, 0x00,
265 0x00, 0x11, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
270 0x00, 0x08, 0x00, 0x00,
272 0x00, 0x00, /* 2 bytes for 4 byte alignment */
275 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
276 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
278 { ICE_ETYPE_OL, 12 },
279 { ICE_VLAN_OFOS, 14 },
280 { ICE_IPV4_OFOS, 18 },
281 { ICE_UDP_ILOS, 38 },
282 { ICE_PROTOCOL_LAST, 0 },
285 /* C-tag (801.1Q), IPv4:UDP dummy packet */
286 static const u8 dummy_vlan_udp_packet[] = {
287 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
288 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00,
291 0x81, 0x00, /* ICE_ETYPE_OL 12 */
293 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
295 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
296 0x00, 0x01, 0x00, 0x00,
297 0x00, 0x11, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00,
299 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
302 0x00, 0x08, 0x00, 0x00,
304 0x00, 0x00, /* 2 bytes for 4 byte alignment */
307 /* offset info for MAC + IPv4 + TCP dummy packet */
308 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
310 { ICE_ETYPE_OL, 12 },
311 { ICE_IPV4_OFOS, 14 },
313 { ICE_PROTOCOL_LAST, 0 },
316 /* Dummy packet for MAC + IPv4 + TCP */
317 static const u8 dummy_tcp_packet[] = {
318 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
319 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00,
322 0x08, 0x00, /* ICE_ETYPE_OL 12 */
324 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
325 0x00, 0x01, 0x00, 0x00,
326 0x00, 0x06, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
331 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00,
333 0x50, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, /* 2 bytes for 4 byte alignment */
339 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
340 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_VLAN_OFOS, 14 },
344 { ICE_IPV4_OFOS, 18 },
346 { ICE_PROTOCOL_LAST, 0 },
349 /* C-tag (801.1Q), IPv4:TCP dummy packet */
350 static const u8 dummy_vlan_tcp_packet[] = {
351 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
352 0x00, 0x00, 0x00, 0x00,
353 0x00, 0x00, 0x00, 0x00,
355 0x81, 0x00, /* ICE_ETYPE_OL 12 */
357 0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
359 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
360 0x00, 0x01, 0x00, 0x00,
361 0x00, 0x06, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
366 0x00, 0x00, 0x00, 0x00,
367 0x00, 0x00, 0x00, 0x00,
368 0x50, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, /* 2 bytes for 4 byte alignment */
374 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
376 { ICE_ETYPE_OL, 12 },
377 { ICE_IPV6_OFOS, 14 },
379 { ICE_PROTOCOL_LAST, 0 },
382 static const u8 dummy_tcp_ipv6_packet[] = {
383 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
384 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00,
387 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
389 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
390 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
391 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00,
393 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00,
395 0x00, 0x00, 0x00, 0x00,
396 0x00, 0x00, 0x00, 0x00,
397 0x00, 0x00, 0x00, 0x00,
398 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
401 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00,
403 0x50, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, /* 2 bytes for 4 byte alignment */
409 /* C-tag (802.1Q): IPv6 + TCP */
410 static const struct ice_dummy_pkt_offsets
411 dummy_vlan_tcp_ipv6_packet_offsets[] = {
413 { ICE_ETYPE_OL, 12 },
414 { ICE_VLAN_OFOS, 14 },
415 { ICE_IPV6_OFOS, 18 },
417 { ICE_PROTOCOL_LAST, 0 },
420 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
421 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
422 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
423 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00,
426 0x81, 0x00, /* ICE_ETYPE_OL 12 */
428 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
430 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
431 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
439 0x00, 0x00, 0x00, 0x00,
441 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
442 0x00, 0x00, 0x00, 0x00,
443 0x00, 0x00, 0x00, 0x00,
444 0x50, 0x00, 0x00, 0x00,
445 0x00, 0x00, 0x00, 0x00,
447 0x00, 0x00, /* 2 bytes for 4 byte alignment */
451 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
453 { ICE_ETYPE_OL, 12 },
454 { ICE_IPV6_OFOS, 14 },
455 { ICE_UDP_ILOS, 54 },
456 { ICE_PROTOCOL_LAST, 0 },
459 /* IPv6 + UDP dummy packet */
460 static const u8 dummy_udp_ipv6_packet[] = {
461 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
462 0x00, 0x00, 0x00, 0x00,
463 0x00, 0x00, 0x00, 0x00,
465 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
467 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
468 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
469 0x00, 0x00, 0x00, 0x00,
470 0x00, 0x00, 0x00, 0x00,
471 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
479 0x00, 0x08, 0x00, 0x00,
481 0x00, 0x00, /* 2 bytes for 4 byte alignment */
484 /* C-tag (802.1Q): IPv6 + UDP */
485 static const struct ice_dummy_pkt_offsets
486 dummy_vlan_udp_ipv6_packet_offsets[] = {
488 { ICE_ETYPE_OL, 12 },
489 { ICE_VLAN_OFOS, 14 },
490 { ICE_IPV6_OFOS, 18 },
491 { ICE_UDP_ILOS, 58 },
492 { ICE_PROTOCOL_LAST, 0 },
495 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
496 static const u8 dummy_vlan_udp_ipv6_packet[] = {
497 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
498 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00,
501 0x81, 0x00, /* ICE_ETYPE_OL 12 */
503 0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
505 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
506 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
507 0x00, 0x00, 0x00, 0x00,
508 0x00, 0x00, 0x00, 0x00,
509 0x00, 0x00, 0x00, 0x00,
510 0x00, 0x00, 0x00, 0x00,
511 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00,
513 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00,
516 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
517 0x00, 0x08, 0x00, 0x00,
519 0x00, 0x00, /* 2 bytes for 4 byte alignment */
522 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
524 { ICE_IPV4_OFOS, 14 },
527 { ICE_PROTOCOL_LAST, 0 },
530 static const u8 dummy_udp_gtp_packet[] = {
531 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
532 0x00, 0x00, 0x00, 0x00,
533 0x00, 0x00, 0x00, 0x00,
536 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
537 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x11, 0x00, 0x00,
539 0x00, 0x00, 0x00, 0x00,
540 0x00, 0x00, 0x00, 0x00,
542 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
543 0x00, 0x1c, 0x00, 0x00,
545 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
546 0x00, 0x00, 0x00, 0x00,
547 0x00, 0x00, 0x00, 0x85,
549 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
550 0x00, 0x00, 0x00, 0x00,
553 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
555 { ICE_ETYPE_OL, 12 },
556 { ICE_VLAN_OFOS, 14},
558 { ICE_PROTOCOL_LAST, 0 },
561 static const u8 dummy_pppoe_packet[] = {
562 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
563 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00,
566 0x81, 0x00, /* ICE_ETYPE_OL 12 */
568 0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
570 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
573 0x00, 0x21, /* PPP Link Layer 24 */
575 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
576 0x00, 0x00, 0x00, 0x00,
577 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
584 /* this is a recipe to profile association bitmap */
585 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
586 ICE_MAX_NUM_PROFILES);
588 /* this is a profile to recipe association bitmap */
589 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
590 ICE_MAX_NUM_RECIPES);
592 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
595 * ice_collect_result_idx - copy result index values
596 * @buf: buffer that contains the result index
597 * @recp: the recipe struct to copy data into
599 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
600 struct ice_sw_recipe *recp)
602 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
603 ice_set_bit(buf->content.result_indx &
604 ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
608 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
609 * @hw: pointer to hardware structure
610 * @recps: struct that we need to populate
611 * @rid: recipe ID that we are populating
612 * @refresh_required: true if we should get recipe to profile mapping from FW
614 * This function is used to populate all the necessary entries into our
615 * bookkeeping so that we have a current list of all the recipes that are
616 * programmed in the firmware.
618 static enum ice_status
619 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
620 bool *refresh_required)
622 ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
623 struct ice_aqc_recipe_data_elem *tmp;
624 u16 num_recps = ICE_MAX_NUM_RECIPES;
625 struct ice_prot_lkup_ext *lkup_exts;
626 u16 i, sub_recps, fv_word_idx = 0;
627 enum ice_status status;
629 ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
631 /* we need a buffer big enough to accommodate all the recipes */
632 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
633 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
635 return ICE_ERR_NO_MEMORY;
637 tmp[0].recipe_indx = rid;
638 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
639 /* non-zero status meaning recipe doesn't exist */
643 /* Get recipe to profile map so that we can get the fv from lkups that
644 * we read for a recipe from FW. Since we want to minimize the number of
645 * times we make this FW call, just make one call and cache the copy
646 * until a new recipe is added. This operation is only required the
647 * first time to get the changes from FW. Then to search existing
648 * entries we don't need to update the cache again until another recipe
651 if (*refresh_required) {
652 ice_get_recp_to_prof_map(hw);
653 *refresh_required = false;
656 /* Start populating all the entries for recps[rid] based on lkups from
657 * firmware. Note that we are only creating the root recipe in our
660 lkup_exts = &recps[rid].lkup_exts;
662 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
663 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
664 struct ice_recp_grp_entry *rg_entry;
665 u8 prof, idx, prot = 0;
669 rg_entry = (struct ice_recp_grp_entry *)
670 ice_malloc(hw, sizeof(*rg_entry));
672 status = ICE_ERR_NO_MEMORY;
676 idx = root_bufs.recipe_indx;
677 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
679 /* Mark all result indices in this chain */
680 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
681 ice_set_bit(root_bufs.content.result_indx &
682 ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
684 /* get the first profile that is associated with rid */
685 prof = ice_find_first_bit(recipe_to_profile[idx],
686 ICE_MAX_NUM_PROFILES);
687 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
688 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
690 rg_entry->fv_idx[i] = lkup_indx;
691 rg_entry->fv_mask[i] =
692 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
694 /* If the recipe is a chained recipe then all its
695 * child recipe's result will have a result index.
696 * To fill fv_words we should not use those result
697 * index, we only need the protocol ids and offsets.
698 * We will skip all the fv_idx which stores result
699 * index in them. We also need to skip any fv_idx which
700 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
701 * valid offset value.
703 if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
704 rg_entry->fv_idx[i]) ||
705 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
706 rg_entry->fv_idx[i] == 0)
709 ice_find_prot_off(hw, ICE_BLK_SW, prof,
710 rg_entry->fv_idx[i], &prot, &off);
711 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
712 lkup_exts->fv_words[fv_word_idx].off = off;
715 /* populate rg_list with the data from the child entry of this
718 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
720 /* Propagate some data to the recipe database */
721 recps[idx].is_root = is_root;
722 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
723 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
724 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
725 recps[idx].chain_idx = root_bufs.content.result_indx &
726 ~ICE_AQ_RECIPE_RESULT_EN;
727 ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
729 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
735 /* Only do the following for root recipes entries */
736 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
737 sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
738 recps[idx].root_rid = root_bufs.content.rid &
739 ~ICE_AQ_RECIPE_ID_IS_ROOT;
740 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
743 /* Complete initialization of the root recipe entry */
744 lkup_exts->n_val_words = fv_word_idx;
745 recps[rid].big_recp = (num_recps > 1);
746 recps[rid].n_grp_count = num_recps;
747 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
748 ice_memdup(hw, tmp, recps[rid].n_grp_count *
749 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
750 if (!recps[rid].root_buf)
753 /* Copy result indexes */
754 ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
755 recps[rid].recp_created = true;
763 * ice_get_recp_to_prof_map - updates recipe to profile mapping
764 * @hw: pointer to hardware structure
766 * This function is used to populate recipe_to_profile matrix where index to
767 * this array is the recipe ID and the element is the mapping of which profiles
768 * is this recipe mapped to.
771 ice_get_recp_to_prof_map(struct ice_hw *hw)
773 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
776 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
779 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
780 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
781 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
783 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
784 ICE_MAX_NUM_RECIPES);
785 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
786 if (ice_is_bit_set(r_bitmap, j))
787 ice_set_bit(i, recipe_to_profile[j]);
792 * ice_init_def_sw_recp - initialize the recipe book keeping tables
793 * @hw: pointer to the HW struct
795 * Allocate memory for the entire recipe table and initialize the structures/
796 * entries corresponding to basic recipes.
798 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
800 struct ice_sw_recipe *recps;
803 recps = (struct ice_sw_recipe *)
804 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
806 return ICE_ERR_NO_MEMORY;
808 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
809 recps[i].root_rid = i;
810 INIT_LIST_HEAD(&recps[i].filt_rules);
811 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
812 INIT_LIST_HEAD(&recps[i].rg_list);
813 ice_init_lock(&recps[i].filt_rule_lock);
816 hw->switch_info->recp_list = recps;
822 * ice_aq_get_sw_cfg - get switch configuration
823 * @hw: pointer to the hardware structure
824 * @buf: pointer to the result buffer
825 * @buf_size: length of the buffer available for response
826 * @req_desc: pointer to requested descriptor
827 * @num_elems: pointer to number of elements
828 * @cd: pointer to command details structure or NULL
830 * Get switch configuration (0x0200) to be placed in 'buff'.
831 * This admin command returns information such as initial VSI/port number
832 * and switch ID it belongs to.
834 * NOTE: *req_desc is both an input/output parameter.
835 * The caller of this function first calls this function with *request_desc set
836 * to 0. If the response from f/w has *req_desc set to 0, all the switch
837 * configuration information has been returned; if non-zero (meaning not all
838 * the information was returned), the caller should call this function again
839 * with *req_desc set to the previous value returned by f/w to get the
840 * next block of switch configuration information.
842 * *num_elems is output only parameter. This reflects the number of elements
843 * in response buffer. The caller of this function to use *num_elems while
844 * parsing the response buffer.
846 static enum ice_status
847 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
848 u16 buf_size, u16 *req_desc, u16 *num_elems,
849 struct ice_sq_cd *cd)
851 struct ice_aqc_get_sw_cfg *cmd;
852 enum ice_status status;
853 struct ice_aq_desc desc;
855 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
856 cmd = &desc.params.get_sw_conf;
857 cmd->element = CPU_TO_LE16(*req_desc);
859 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
861 *req_desc = LE16_TO_CPU(cmd->element);
862 *num_elems = LE16_TO_CPU(cmd->num_elems);
869 * ice_alloc_sw - allocate resources specific to switch
870 * @hw: pointer to the HW struct
871 * @ena_stats: true to turn on VEB stats
872 * @shared_res: true for shared resource, false for dedicated resource
873 * @sw_id: switch ID returned
874 * @counter_id: VEB counter ID returned
876 * allocates switch resources (SWID and VEB counter) (0x0208)
879 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
882 struct ice_aqc_alloc_free_res_elem *sw_buf;
883 struct ice_aqc_res_elem *sw_ele;
884 enum ice_status status;
887 buf_len = sizeof(*sw_buf);
888 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
889 ice_malloc(hw, buf_len);
891 return ICE_ERR_NO_MEMORY;
893 /* Prepare buffer for switch ID.
894 * The number of resource entries in buffer is passed as 1 since only a
895 * single switch/VEB instance is allocated, and hence a single sw_id
898 sw_buf->num_elems = CPU_TO_LE16(1);
900 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
901 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
902 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
904 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
905 ice_aqc_opc_alloc_res, NULL);
908 goto ice_alloc_sw_exit;
910 sw_ele = &sw_buf->elem[0];
911 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
914 /* Prepare buffer for VEB Counter */
915 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
916 struct ice_aqc_alloc_free_res_elem *counter_buf;
917 struct ice_aqc_res_elem *counter_ele;
919 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
920 ice_malloc(hw, buf_len);
922 status = ICE_ERR_NO_MEMORY;
923 goto ice_alloc_sw_exit;
926 /* The number of resource entries in buffer is passed as 1 since
927 * only a single switch/VEB instance is allocated, and hence a
928 * single VEB counter is requested.
930 counter_buf->num_elems = CPU_TO_LE16(1);
931 counter_buf->res_type =
932 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
933 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
934 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
938 ice_free(hw, counter_buf);
939 goto ice_alloc_sw_exit;
941 counter_ele = &counter_buf->elem[0];
942 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
943 ice_free(hw, counter_buf);
947 ice_free(hw, sw_buf);
952 * ice_free_sw - free resources specific to switch
953 * @hw: pointer to the HW struct
954 * @sw_id: switch ID returned
955 * @counter_id: VEB counter ID returned
957 * free switch resources (SWID and VEB counter) (0x0209)
959 * NOTE: This function frees multiple resources. It continues
960 * releasing other resources even after it encounters error.
961 * The error code returned is the last error it encountered.
963 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
965 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
966 enum ice_status status, ret_status;
969 buf_len = sizeof(*sw_buf);
970 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
971 ice_malloc(hw, buf_len);
973 return ICE_ERR_NO_MEMORY;
975 /* Prepare buffer to free for switch ID res.
976 * The number of resource entries in buffer is passed as 1 since only a
977 * single switch/VEB instance is freed, and hence a single sw_id
980 sw_buf->num_elems = CPU_TO_LE16(1);
981 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
982 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
984 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
985 ice_aqc_opc_free_res, NULL);
988 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
990 /* Prepare buffer to free for VEB Counter resource */
991 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
992 ice_malloc(hw, buf_len);
994 ice_free(hw, sw_buf);
995 return ICE_ERR_NO_MEMORY;
998 /* The number of resource entries in buffer is passed as 1 since only a
999 * single switch/VEB instance is freed, and hence a single VEB counter
1002 counter_buf->num_elems = CPU_TO_LE16(1);
1003 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1004 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1006 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1007 ice_aqc_opc_free_res, NULL);
1009 ice_debug(hw, ICE_DBG_SW,
1010 "VEB counter resource could not be freed\n");
1011 ret_status = status;
1014 ice_free(hw, counter_buf);
1015 ice_free(hw, sw_buf);
1021 * @hw: pointer to the HW struct
1022 * @vsi_ctx: pointer to a VSI context struct
1023 * @cd: pointer to command details structure or NULL
1025 * Add a VSI context to the hardware (0x0210)
1028 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1029 struct ice_sq_cd *cd)
1031 struct ice_aqc_add_update_free_vsi_resp *res;
1032 struct ice_aqc_add_get_update_free_vsi *cmd;
1033 struct ice_aq_desc desc;
1034 enum ice_status status;
1036 cmd = &desc.params.vsi_cmd;
1037 res = &desc.params.add_update_free_vsi_res;
1039 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1041 if (!vsi_ctx->alloc_from_pool)
1042 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1043 ICE_AQ_VSI_IS_VALID);
1045 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1047 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1049 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1050 sizeof(vsi_ctx->info), cd);
1053 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1054 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1055 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1063 * @hw: pointer to the HW struct
1064 * @vsi_ctx: pointer to a VSI context struct
1065 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1066 * @cd: pointer to command details structure or NULL
1068 * Free VSI context info from hardware (0x0213)
1071 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1072 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1074 struct ice_aqc_add_update_free_vsi_resp *resp;
1075 struct ice_aqc_add_get_update_free_vsi *cmd;
1076 struct ice_aq_desc desc;
1077 enum ice_status status;
1079 cmd = &desc.params.vsi_cmd;
1080 resp = &desc.params.add_update_free_vsi_res;
1082 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1084 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1086 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1088 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1090 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1091 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1099 * @hw: pointer to the HW struct
1100 * @vsi_ctx: pointer to a VSI context struct
1101 * @cd: pointer to command details structure or NULL
1103 * Update VSI context in the hardware (0x0211)
1106 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1107 struct ice_sq_cd *cd)
1109 struct ice_aqc_add_update_free_vsi_resp *resp;
1110 struct ice_aqc_add_get_update_free_vsi *cmd;
1111 struct ice_aq_desc desc;
1112 enum ice_status status;
1114 cmd = &desc.params.vsi_cmd;
1115 resp = &desc.params.add_update_free_vsi_res;
1117 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1119 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1121 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1123 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1124 sizeof(vsi_ctx->info), cd);
1127 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1128 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1135 * ice_is_vsi_valid - check whether the VSI is valid or not
1136 * @hw: pointer to the HW struct
1137 * @vsi_handle: VSI handle
1139 * check whether the VSI is valid or not
1141 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1143 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1147 * ice_get_hw_vsi_num - return the HW VSI number
1148 * @hw: pointer to the HW struct
1149 * @vsi_handle: VSI handle
1151 * return the HW VSI number
1152 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1154 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1156 return hw->vsi_ctx[vsi_handle]->vsi_num;
1160 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1161 * @hw: pointer to the HW struct
1162 * @vsi_handle: VSI handle
1164 * return the VSI context entry for a given VSI handle
1166 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1168 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1172 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1173 * @hw: pointer to the HW struct
1174 * @vsi_handle: VSI handle
1175 * @vsi: VSI context pointer
1177 * save the VSI context entry for a given VSI handle
1180 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1182 hw->vsi_ctx[vsi_handle] = vsi;
1186 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1187 * @hw: pointer to the HW struct
1188 * @vsi_handle: VSI handle
1190 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1192 struct ice_vsi_ctx *vsi;
1195 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1198 ice_for_each_traffic_class(i) {
1199 if (vsi->lan_q_ctx[i]) {
1200 ice_free(hw, vsi->lan_q_ctx[i]);
1201 vsi->lan_q_ctx[i] = NULL;
1207 * ice_clear_vsi_ctx - clear the VSI context entry
1208 * @hw: pointer to the HW struct
1209 * @vsi_handle: VSI handle
1211 * clear the VSI context entry
1213 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1215 struct ice_vsi_ctx *vsi;
1217 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1219 ice_clear_vsi_q_ctx(hw, vsi_handle);
1221 hw->vsi_ctx[vsi_handle] = NULL;
1226 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1227 * @hw: pointer to the HW struct
1229 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1233 for (i = 0; i < ICE_MAX_VSI; i++)
1234 ice_clear_vsi_ctx(hw, i);
1238 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1239 * @hw: pointer to the HW struct
1240 * @vsi_handle: unique VSI handle provided by drivers
1241 * @vsi_ctx: pointer to a VSI context struct
1242 * @cd: pointer to command details structure or NULL
1244 * Add a VSI context to the hardware also add it into the VSI handle list.
1245 * If this function gets called after reset for existing VSIs then update
1246 * with the new HW VSI number in the corresponding VSI handle list entry.
1249 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1250 struct ice_sq_cd *cd)
1252 struct ice_vsi_ctx *tmp_vsi_ctx;
1253 enum ice_status status;
1255 if (vsi_handle >= ICE_MAX_VSI)
1256 return ICE_ERR_PARAM;
1257 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1260 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1262 /* Create a new VSI context */
1263 tmp_vsi_ctx = (struct ice_vsi_ctx *)
1264 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
1266 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1267 return ICE_ERR_NO_MEMORY;
1269 *tmp_vsi_ctx = *vsi_ctx;
1271 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1273 /* update with new HW VSI num */
1274 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1281 * ice_free_vsi- free VSI context from hardware and VSI handle list
1282 * @hw: pointer to the HW struct
1283 * @vsi_handle: unique VSI handle
1284 * @vsi_ctx: pointer to a VSI context struct
1285 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1286 * @cd: pointer to command details structure or NULL
1288 * Free VSI context info from hardware as well as from VSI handle list
1291 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1292 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1294 enum ice_status status;
1296 if (!ice_is_vsi_valid(hw, vsi_handle))
1297 return ICE_ERR_PARAM;
1298 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1299 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1301 ice_clear_vsi_ctx(hw, vsi_handle);
1307 * @hw: pointer to the HW struct
1308 * @vsi_handle: unique VSI handle
1309 * @vsi_ctx: pointer to a VSI context struct
1310 * @cd: pointer to command details structure or NULL
1312 * Update VSI context in the hardware
1315 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1316 struct ice_sq_cd *cd)
1318 if (!ice_is_vsi_valid(hw, vsi_handle))
1319 return ICE_ERR_PARAM;
1320 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1321 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1325 * ice_aq_get_vsi_params
1326 * @hw: pointer to the HW struct
1327 * @vsi_ctx: pointer to a VSI context struct
1328 * @cd: pointer to command details structure or NULL
1330 * Get VSI context info from hardware (0x0212)
1333 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1334 struct ice_sq_cd *cd)
1336 struct ice_aqc_add_get_update_free_vsi *cmd;
1337 struct ice_aqc_get_vsi_resp *resp;
1338 struct ice_aq_desc desc;
1339 enum ice_status status;
1341 cmd = &desc.params.vsi_cmd;
1342 resp = &desc.params.get_vsi_resp;
1344 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
1346 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1348 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1349 sizeof(vsi_ctx->info), cd);
1351 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
1353 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1354 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1361 * ice_aq_add_update_mir_rule - add/update a mirror rule
1362 * @hw: pointer to the HW struct
1363 * @rule_type: Rule Type
1364 * @dest_vsi: VSI number to which packets will be mirrored
1365 * @count: length of the list
1366 * @mr_buf: buffer for list of mirrored VSI numbers
1367 * @cd: pointer to command details structure or NULL
1370 * Add/Update Mirror Rule (0x260).
1373 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
1374 u16 count, struct ice_mir_rule_buf *mr_buf,
1375 struct ice_sq_cd *cd, u16 *rule_id)
1377 struct ice_aqc_add_update_mir_rule *cmd;
1378 struct ice_aq_desc desc;
1379 enum ice_status status;
1380 __le16 *mr_list = NULL;
1383 switch (rule_type) {
1384 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
1385 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
1386 /* Make sure count and mr_buf are set for these rule_types */
1387 if (!(count && mr_buf))
1388 return ICE_ERR_PARAM;
1390 buf_size = count * sizeof(__le16);
1391 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
1393 return ICE_ERR_NO_MEMORY;
1395 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1396 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1397 /* Make sure count and mr_buf are not set for these
1400 if (count || mr_buf)
1401 return ICE_ERR_PARAM;
1404 ice_debug(hw, ICE_DBG_SW,
1405 "Error due to unsupported rule_type %u\n", rule_type);
1406 return ICE_ERR_OUT_OF_RANGE;
1409 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1411 /* Pre-process 'mr_buf' items for add/update of virtual port
1412 * ingress/egress mirroring (but not physical port ingress/egress
1418 for (i = 0; i < count; i++) {
1421 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1423 /* Validate specified VSI number, make sure it is less
1424 * than ICE_MAX_VSI, if not return with error.
1426 if (id >= ICE_MAX_VSI) {
1427 ice_debug(hw, ICE_DBG_SW,
1428 "Error VSI index (%u) out-of-range\n",
1430 ice_free(hw, mr_list);
1431 return ICE_ERR_OUT_OF_RANGE;
1434 /* add VSI to mirror rule */
1437 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1438 else /* remove VSI from mirror rule */
1439 mr_list[i] = CPU_TO_LE16(id);
1443 cmd = &desc.params.add_update_rule;
1444 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1445 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1446 ICE_AQC_RULE_ID_VALID_M);
1447 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1448 cmd->num_entries = CPU_TO_LE16(count);
1449 cmd->dest = CPU_TO_LE16(dest_vsi);
1451 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1453 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1455 ice_free(hw, mr_list);
1461 * ice_aq_delete_mir_rule - delete a mirror rule
1462 * @hw: pointer to the HW struct
1463 * @rule_id: Mirror rule ID (to be deleted)
1464 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1465 * otherwise it is returned to the shared pool
1466 * @cd: pointer to command details structure or NULL
1468 * Delete Mirror Rule (0x261).
1471 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1472 struct ice_sq_cd *cd)
1474 struct ice_aqc_delete_mir_rule *cmd;
1475 struct ice_aq_desc desc;
1477 /* rule_id should be in the range 0...63 */
1478 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1479 return ICE_ERR_OUT_OF_RANGE;
1481 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1483 cmd = &desc.params.del_rule;
1484 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1485 cmd->rule_id = CPU_TO_LE16(rule_id);
1488 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1490 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1494 * ice_aq_alloc_free_vsi_list
1495 * @hw: pointer to the HW struct
1496 * @vsi_list_id: VSI list ID returned or used for lookup
1497 * @lkup_type: switch rule filter lookup type
1498 * @opc: switch rules population command type - pass in the command opcode
1500 * allocates or free a VSI list resource
1502 static enum ice_status
1503 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1504 enum ice_sw_lkup_type lkup_type,
1505 enum ice_adminq_opc opc)
1507 struct ice_aqc_alloc_free_res_elem *sw_buf;
1508 struct ice_aqc_res_elem *vsi_ele;
1509 enum ice_status status;
1512 buf_len = sizeof(*sw_buf);
1513 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1514 ice_malloc(hw, buf_len);
1516 return ICE_ERR_NO_MEMORY;
1517 sw_buf->num_elems = CPU_TO_LE16(1);
1519 if (lkup_type == ICE_SW_LKUP_MAC ||
1520 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1521 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1522 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1523 lkup_type == ICE_SW_LKUP_PROMISC ||
1524 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1525 lkup_type == ICE_SW_LKUP_LAST) {
1526 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1527 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1529 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1531 status = ICE_ERR_PARAM;
1532 goto ice_aq_alloc_free_vsi_list_exit;
1535 if (opc == ice_aqc_opc_free_res)
1536 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1538 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1540 goto ice_aq_alloc_free_vsi_list_exit;
1542 if (opc == ice_aqc_opc_alloc_res) {
1543 vsi_ele = &sw_buf->elem[0];
1544 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1547 ice_aq_alloc_free_vsi_list_exit:
1548 ice_free(hw, sw_buf);
1553 * ice_aq_set_storm_ctrl - Sets storm control configuration
1554 * @hw: pointer to the HW struct
1555 * @bcast_thresh: represents the upper threshold for broadcast storm control
1556 * @mcast_thresh: represents the upper threshold for multicast storm control
1557 * @ctl_bitmask: storm control control knobs
1559 * Sets the storm control configuration (0x0280)
1562 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1565 struct ice_aqc_storm_cfg *cmd;
1566 struct ice_aq_desc desc;
1568 cmd = &desc.params.storm_conf;
1570 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1572 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1573 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1574 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1576 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1580 * ice_aq_get_storm_ctrl - gets storm control configuration
1581 * @hw: pointer to the HW struct
1582 * @bcast_thresh: represents the upper threshold for broadcast storm control
1583 * @mcast_thresh: represents the upper threshold for multicast storm control
1584 * @ctl_bitmask: storm control control knobs
1586 * Gets the storm control configuration (0x0281)
1589 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1592 enum ice_status status;
1593 struct ice_aq_desc desc;
1595 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1597 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1599 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1602 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1605 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1608 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1615 * ice_aq_sw_rules - add/update/remove switch rules
1616 * @hw: pointer to the HW struct
1617 * @rule_list: pointer to switch rule population list
1618 * @rule_list_sz: total size of the rule list in bytes
1619 * @num_rules: number of switch rules in the rule_list
1620 * @opc: switch rules population command type - pass in the command opcode
1621 * @cd: pointer to command details structure or NULL
1623 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1625 static enum ice_status
1626 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1627 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1629 struct ice_aq_desc desc;
1631 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1633 if (opc != ice_aqc_opc_add_sw_rules &&
1634 opc != ice_aqc_opc_update_sw_rules &&
1635 opc != ice_aqc_opc_remove_sw_rules)
1636 return ICE_ERR_PARAM;
1638 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1640 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1641 desc.params.sw_rules.num_rules_fltr_entry_index =
1642 CPU_TO_LE16(num_rules);
1643 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1647 * ice_aq_add_recipe - add switch recipe
1648 * @hw: pointer to the HW struct
1649 * @s_recipe_list: pointer to switch rule population list
1650 * @num_recipes: number of switch recipes in the list
1651 * @cd: pointer to command details structure or NULL
1656 ice_aq_add_recipe(struct ice_hw *hw,
1657 struct ice_aqc_recipe_data_elem *s_recipe_list,
1658 u16 num_recipes, struct ice_sq_cd *cd)
1660 struct ice_aqc_add_get_recipe *cmd;
1661 struct ice_aq_desc desc;
1664 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1665 cmd = &desc.params.add_get_recipe;
1666 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1668 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1669 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1671 buf_size = num_recipes * sizeof(*s_recipe_list);
1673 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1677 * ice_aq_get_recipe - get switch recipe
1678 * @hw: pointer to the HW struct
1679 * @s_recipe_list: pointer to switch rule population list
1680 * @num_recipes: pointer to the number of recipes (input and output)
1681 * @recipe_root: root recipe number of recipe(s) to retrieve
1682 * @cd: pointer to command details structure or NULL
1686 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1687 * On output, *num_recipes will equal the number of entries returned in
1690 * The caller must supply enough space in s_recipe_list to hold all possible
1691 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1694 ice_aq_get_recipe(struct ice_hw *hw,
1695 struct ice_aqc_recipe_data_elem *s_recipe_list,
1696 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1698 struct ice_aqc_add_get_recipe *cmd;
1699 struct ice_aq_desc desc;
1700 enum ice_status status;
1703 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1704 return ICE_ERR_PARAM;
1706 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1707 cmd = &desc.params.add_get_recipe;
1708 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1710 cmd->return_index = CPU_TO_LE16(recipe_root);
1711 cmd->num_sub_recipes = 0;
1713 buf_size = *num_recipes * sizeof(*s_recipe_list);
1715 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1716 /* cppcheck-suppress constArgument */
1717 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1723 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1724 * @hw: pointer to the HW struct
1725 * @profile_id: package profile ID to associate the recipe with
1726 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1727 * @cd: pointer to command details structure or NULL
1728 * Recipe to profile association (0x0291)
1731 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1732 struct ice_sq_cd *cd)
1734 struct ice_aqc_recipe_to_profile *cmd;
1735 struct ice_aq_desc desc;
1737 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1738 cmd = &desc.params.recipe_to_profile;
1739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1740 cmd->profile_id = CPU_TO_LE16(profile_id);
1741 /* Set the recipe ID bit in the bitmask to let the device know which
1742 * profile we are associating the recipe to
1744 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1745 ICE_NONDMA_TO_NONDMA);
1747 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1751 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1752 * @hw: pointer to the HW struct
1753 * @profile_id: package profile ID to associate the recipe with
1754 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1755 * @cd: pointer to command details structure or NULL
1756 * Associate profile ID with given recipe (0x0293)
1759 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1760 struct ice_sq_cd *cd)
1762 struct ice_aqc_recipe_to_profile *cmd;
1763 struct ice_aq_desc desc;
1764 enum ice_status status;
1766 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1767 cmd = &desc.params.recipe_to_profile;
1768 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1769 cmd->profile_id = CPU_TO_LE16(profile_id);
1771 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1773 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1774 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1780 * ice_alloc_recipe - add recipe resource
1781 * @hw: pointer to the hardware structure
1782 * @rid: recipe ID returned as response to AQ call
1784 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1786 struct ice_aqc_alloc_free_res_elem *sw_buf;
1787 enum ice_status status;
1790 buf_len = sizeof(*sw_buf);
1791 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1793 return ICE_ERR_NO_MEMORY;
1795 sw_buf->num_elems = CPU_TO_LE16(1);
1796 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1797 ICE_AQC_RES_TYPE_S) |
1798 ICE_AQC_RES_TYPE_FLAG_SHARED);
1799 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1800 ice_aqc_opc_alloc_res, NULL);
1802 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1803 ice_free(hw, sw_buf);
1808 /* ice_init_port_info - Initialize port_info with switch configuration data
1809 * @pi: pointer to port_info
1810 * @vsi_port_num: VSI number or port number
1811 * @type: Type of switch element (port or VSI)
1812 * @swid: switch ID of the switch the element is attached to
1813 * @pf_vf_num: PF or VF number
1814 * @is_vf: true if the element is a VF, false otherwise
1817 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1818 u16 swid, u16 pf_vf_num, bool is_vf)
1821 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1822 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1824 pi->pf_vf_num = pf_vf_num;
1826 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1827 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1830 ice_debug(pi->hw, ICE_DBG_SW,
1831 "incorrect VSI/port type received\n");
1836 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1837 * @hw: pointer to the hardware structure
1839 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1841 struct ice_aqc_get_sw_cfg_resp *rbuf;
1842 enum ice_status status;
1843 u16 num_total_ports;
1849 num_total_ports = 1;
1851 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1852 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1855 return ICE_ERR_NO_MEMORY;
1857 /* Multiple calls to ice_aq_get_sw_cfg may be required
1858 * to get all the switch configuration information. The need
1859 * for additional calls is indicated by ice_aq_get_sw_cfg
1860 * writing a non-zero value in req_desc
1863 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1864 &req_desc, &num_elems, NULL);
1869 for (i = 0; i < num_elems; i++) {
1870 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1871 u16 pf_vf_num, swid, vsi_port_num;
1875 ele = rbuf[i].elements;
1876 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1877 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1879 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1880 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1882 swid = LE16_TO_CPU(ele->swid);
1884 if (LE16_TO_CPU(ele->pf_vf_num) &
1885 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1888 type = LE16_TO_CPU(ele->vsi_port_num) >>
1889 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1892 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1893 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1894 if (j == num_total_ports) {
1895 ice_debug(hw, ICE_DBG_SW,
1896 "more ports than expected\n");
1897 status = ICE_ERR_CFG;
1900 ice_init_port_info(hw->port_info,
1901 vsi_port_num, type, swid,
1909 } while (req_desc && !status);
1912 ice_free(hw, (void *)rbuf);
1917 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1918 * @hw: pointer to the hardware structure
1919 * @fi: filter info structure to fill/update
1921 * This helper function populates the lb_en and lan_en elements of the provided
1922 * ice_fltr_info struct using the switch's type and characteristics of the
1923 * switch rule being configured.
1925 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1929 if ((fi->flag & ICE_FLTR_TX) &&
1930 (fi->fltr_act == ICE_FWD_TO_VSI ||
1931 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1932 fi->fltr_act == ICE_FWD_TO_Q ||
1933 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1934 /* Setting LB for prune actions will result in replicated
1935 * packets to the internal switch that will be dropped.
1937 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1940 /* Set lan_en to TRUE if
1941 * 1. The switch is a VEB AND
1943 * 2.1 The lookup is a directional lookup like ethertype,
1944 * promiscuous, ethertype-MAC, promiscuous-VLAN
1945 * and default-port OR
1946 * 2.2 The lookup is VLAN, OR
1947 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1948 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1952 * The switch is a VEPA.
1954 * In all other cases, the LAN enable has to be set to false.
1957 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1958 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1959 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1960 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1961 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1962 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1963 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1964 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1965 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1966 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1975 * ice_fill_sw_rule - Helper function to fill switch rule structure
1976 * @hw: pointer to the hardware structure
1977 * @f_info: entry containing packet forwarding information
1978 * @s_rule: switch rule structure to be filled in based on mac_entry
1979 * @opc: switch rules population command type - pass in the command opcode
1982 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1983 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1985 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1993 if (opc == ice_aqc_opc_remove_sw_rules) {
1994 s_rule->pdata.lkup_tx_rx.act = 0;
1995 s_rule->pdata.lkup_tx_rx.index =
1996 CPU_TO_LE16(f_info->fltr_rule_id);
1997 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2001 eth_hdr_sz = sizeof(dummy_eth_header);
2002 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2004 /* initialize the ether header with a dummy header */
2005 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2006 ice_fill_sw_info(hw, f_info);
2008 switch (f_info->fltr_act) {
2009 case ICE_FWD_TO_VSI:
2010 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2011 ICE_SINGLE_ACT_VSI_ID_M;
2012 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2013 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2014 ICE_SINGLE_ACT_VALID_BIT;
2016 case ICE_FWD_TO_VSI_LIST:
2017 act |= ICE_SINGLE_ACT_VSI_LIST;
2018 act |= (f_info->fwd_id.vsi_list_id <<
2019 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2020 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2021 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2022 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2023 ICE_SINGLE_ACT_VALID_BIT;
2026 act |= ICE_SINGLE_ACT_TO_Q;
2027 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2028 ICE_SINGLE_ACT_Q_INDEX_M;
2030 case ICE_DROP_PACKET:
2031 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2032 ICE_SINGLE_ACT_VALID_BIT;
2034 case ICE_FWD_TO_QGRP:
2035 q_rgn = f_info->qgrp_size > 0 ?
2036 (u8)ice_ilog2(f_info->qgrp_size) : 0;
2037 act |= ICE_SINGLE_ACT_TO_Q;
2038 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2039 ICE_SINGLE_ACT_Q_INDEX_M;
2040 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2041 ICE_SINGLE_ACT_Q_REGION_M;
2048 act |= ICE_SINGLE_ACT_LB_ENABLE;
2050 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2052 switch (f_info->lkup_type) {
2053 case ICE_SW_LKUP_MAC:
2054 daddr = f_info->l_data.mac.mac_addr;
2056 case ICE_SW_LKUP_VLAN:
2057 vlan_id = f_info->l_data.vlan.vlan_id;
2058 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2059 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2060 act |= ICE_SINGLE_ACT_PRUNE;
2061 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2064 case ICE_SW_LKUP_ETHERTYPE_MAC:
2065 daddr = f_info->l_data.ethertype_mac.mac_addr;
2067 case ICE_SW_LKUP_ETHERTYPE:
2068 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2069 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2071 case ICE_SW_LKUP_MAC_VLAN:
2072 daddr = f_info->l_data.mac_vlan.mac_addr;
2073 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2075 case ICE_SW_LKUP_PROMISC_VLAN:
2076 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2078 case ICE_SW_LKUP_PROMISC:
2079 daddr = f_info->l_data.mac_vlan.mac_addr;
2085 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2086 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2087 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2089 /* Recipe set depending on lookup type */
2090 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2091 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2092 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2095 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2096 ICE_NONDMA_TO_NONDMA);
2098 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2099 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2100 *off = CPU_TO_BE16(vlan_id);
2103 /* Create the switch rule with the final dummy Ethernet header */
2104 if (opc != ice_aqc_opc_update_sw_rules)
2105 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2109 * ice_add_marker_act
2110 * @hw: pointer to the hardware structure
2111 * @m_ent: the management entry for which sw marker needs to be added
2112 * @sw_marker: sw marker to tag the Rx descriptor with
2113 * @l_id: large action resource ID
2115 * Create a large action to hold software marker and update the switch rule
2116 * entry pointed by m_ent with newly created large action
2118 static enum ice_status
2119 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2120 u16 sw_marker, u16 l_id)
2122 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2123 /* For software marker we need 3 large actions
2124 * 1. FWD action: FWD TO VSI or VSI LIST
2125 * 2. GENERIC VALUE action to hold the profile ID
2126 * 3. GENERIC VALUE action to hold the software marker ID
2128 const u16 num_lg_acts = 3;
2129 enum ice_status status;
2135 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2136 return ICE_ERR_PARAM;
2138 /* Create two back-to-back switch rules and submit them to the HW using
2139 * one memory buffer:
2143 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2144 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2145 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2147 return ICE_ERR_NO_MEMORY;
2149 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2151 /* Fill in the first switch rule i.e. large action */
2152 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2153 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2154 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2156 /* First action VSI forwarding or VSI list forwarding depending on how
2159 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2160 m_ent->fltr_info.fwd_id.hw_vsi_id;
2162 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2163 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2164 ICE_LG_ACT_VSI_LIST_ID_M;
2165 if (m_ent->vsi_count > 1)
2166 act |= ICE_LG_ACT_VSI_LIST;
2167 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2169 /* Second action descriptor type */
2170 act = ICE_LG_ACT_GENERIC;
2172 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2173 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2175 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2176 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2178 /* Third action Marker value */
2179 act |= ICE_LG_ACT_GENERIC;
2180 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2181 ICE_LG_ACT_GENERIC_VALUE_M;
2183 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
2185 /* call the fill switch rule to fill the lookup Tx Rx structure */
2186 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2187 ice_aqc_opc_update_sw_rules);
2189 /* Update the action to point to the large action ID */
2190 rx_tx->pdata.lkup_tx_rx.act =
2191 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
2192 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2193 ICE_SINGLE_ACT_PTR_VAL_M));
2195 /* Use the filter rule ID of the previously created rule with single
2196 * act. Once the update happens, hardware will treat this as large
2199 rx_tx->pdata.lkup_tx_rx.index =
2200 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
2202 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2203 ice_aqc_opc_update_sw_rules, NULL);
2205 m_ent->lg_act_idx = l_id;
2206 m_ent->sw_marker_id = sw_marker;
2209 ice_free(hw, lg_act);
2214 * ice_add_counter_act - add/update filter rule with counter action
2215 * @hw: pointer to the hardware structure
2216 * @m_ent: the management entry for which counter needs to be added
2217 * @counter_id: VLAN counter ID returned as part of allocate resource
2218 * @l_id: large action resource ID
2220 static enum ice_status
2221 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2222 u16 counter_id, u16 l_id)
2224 struct ice_aqc_sw_rules_elem *lg_act;
2225 struct ice_aqc_sw_rules_elem *rx_tx;
2226 enum ice_status status;
2227 /* 2 actions will be added while adding a large action counter */
2228 const int num_acts = 2;
2235 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2236 return ICE_ERR_PARAM;
2238 /* Create two back-to-back switch rules and submit them to the HW using
2239 * one memory buffer:
2243 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
2244 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2245 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
2248 return ICE_ERR_NO_MEMORY;
2250 rx_tx = (struct ice_aqc_sw_rules_elem *)
2251 ((u8 *)lg_act + lg_act_size);
2253 /* Fill in the first switch rule i.e. large action */
2254 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2255 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2256 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
2258 /* First action VSI forwarding or VSI list forwarding depending on how
2261 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2262 m_ent->fltr_info.fwd_id.hw_vsi_id;
2264 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2265 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2266 ICE_LG_ACT_VSI_LIST_ID_M;
2267 if (m_ent->vsi_count > 1)
2268 act |= ICE_LG_ACT_VSI_LIST;
2269 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2271 /* Second action counter ID */
2272 act = ICE_LG_ACT_STAT_COUNT;
2273 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
2274 ICE_LG_ACT_STAT_COUNT_M;
2275 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2277 /* call the fill switch rule to fill the lookup Tx Rx structure */
2278 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2279 ice_aqc_opc_update_sw_rules);
2281 act = ICE_SINGLE_ACT_PTR;
2282 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
2283 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2285 /* Use the filter rule ID of the previously created rule with single
2286 * act. Once the update happens, hardware will treat this as large
2289 f_rule_id = m_ent->fltr_info.fltr_rule_id;
2290 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
2292 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2293 ice_aqc_opc_update_sw_rules, NULL);
2295 m_ent->lg_act_idx = l_id;
2296 m_ent->counter_index = counter_id;
2299 ice_free(hw, lg_act);
2304 * ice_create_vsi_list_map
2305 * @hw: pointer to the hardware structure
2306 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2307 * @num_vsi: number of VSI handles in the array
2308 * @vsi_list_id: VSI list ID generated as part of allocate resource
2310 * Helper function to create a new entry of VSI list ID to VSI mapping
2311 * using the given VSI list ID
2313 static struct ice_vsi_list_map_info *
2314 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2317 struct ice_switch_info *sw = hw->switch_info;
2318 struct ice_vsi_list_map_info *v_map;
2321 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
2326 v_map->vsi_list_id = vsi_list_id;
2328 for (i = 0; i < num_vsi; i++)
2329 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
2331 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
2336 * ice_update_vsi_list_rule
2337 * @hw: pointer to the hardware structure
2338 * @vsi_handle_arr: array of VSI handles to form a VSI list
2339 * @num_vsi: number of VSI handles in the array
2340 * @vsi_list_id: VSI list ID generated as part of allocate resource
2341 * @remove: Boolean value to indicate if this is a remove action
2342 * @opc: switch rules population command type - pass in the command opcode
2343 * @lkup_type: lookup type of the filter
2345 * Call AQ command to add a new switch rule or update existing switch rule
2346 * using the given VSI list ID
2348 static enum ice_status
2349 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2350 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2351 enum ice_sw_lkup_type lkup_type)
2353 struct ice_aqc_sw_rules_elem *s_rule;
2354 enum ice_status status;
2360 return ICE_ERR_PARAM;
2362 if (lkup_type == ICE_SW_LKUP_MAC ||
2363 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2364 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2365 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2366 lkup_type == ICE_SW_LKUP_PROMISC ||
2367 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2368 lkup_type == ICE_SW_LKUP_LAST)
2369 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2370 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2371 else if (lkup_type == ICE_SW_LKUP_VLAN)
2372 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2373 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2375 return ICE_ERR_PARAM;
2377 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
2378 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2380 return ICE_ERR_NO_MEMORY;
2381 for (i = 0; i < num_vsi; i++) {
2382 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2383 status = ICE_ERR_PARAM;
2386 /* AQ call requires hw_vsi_id(s) */
2387 s_rule->pdata.vsi_list.vsi[i] =
2388 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2391 s_rule->type = CPU_TO_LE16(type);
2392 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2393 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2395 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2398 ice_free(hw, s_rule);
2403 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2404 * @hw: pointer to the HW struct
2405 * @vsi_handle_arr: array of VSI handles to form a VSI list
2406 * @num_vsi: number of VSI handles in the array
2407 * @vsi_list_id: stores the ID of the VSI list to be created
2408 * @lkup_type: switch rule filter's lookup type
2410 static enum ice_status
2411 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2412 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2414 enum ice_status status;
2416 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2417 ice_aqc_opc_alloc_res);
2421 /* Update the newly created VSI list to include the specified VSIs */
2422 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2423 *vsi_list_id, false,
2424 ice_aqc_opc_add_sw_rules, lkup_type);
2428 * ice_create_pkt_fwd_rule
2429 * @hw: pointer to the hardware structure
2430 * @f_entry: entry containing packet forwarding information
2432 * Create switch rule with given filter information and add an entry
2433 * to the corresponding filter management list to track this switch rule
2436 static enum ice_status
2437 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2438 struct ice_fltr_list_entry *f_entry)
2440 struct ice_fltr_mgmt_list_entry *fm_entry;
2441 struct ice_aqc_sw_rules_elem *s_rule;
2442 enum ice_sw_lkup_type l_type;
2443 struct ice_sw_recipe *recp;
2444 enum ice_status status;
2446 s_rule = (struct ice_aqc_sw_rules_elem *)
2447 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2449 return ICE_ERR_NO_MEMORY;
2450 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2451 ice_malloc(hw, sizeof(*fm_entry));
2453 status = ICE_ERR_NO_MEMORY;
2454 goto ice_create_pkt_fwd_rule_exit;
2457 fm_entry->fltr_info = f_entry->fltr_info;
2459 /* Initialize all the fields for the management entry */
2460 fm_entry->vsi_count = 1;
2461 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2462 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2463 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2465 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2466 ice_aqc_opc_add_sw_rules);
2468 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2469 ice_aqc_opc_add_sw_rules, NULL);
2471 ice_free(hw, fm_entry);
2472 goto ice_create_pkt_fwd_rule_exit;
2475 f_entry->fltr_info.fltr_rule_id =
2476 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2477 fm_entry->fltr_info.fltr_rule_id =
2478 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2480 /* The book keeping entries will get removed when base driver
2481 * calls remove filter AQ command
2483 l_type = fm_entry->fltr_info.lkup_type;
2484 recp = &hw->switch_info->recp_list[l_type];
2485 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2487 ice_create_pkt_fwd_rule_exit:
2488 ice_free(hw, s_rule);
2493 * ice_update_pkt_fwd_rule
2494 * @hw: pointer to the hardware structure
2495 * @f_info: filter information for switch rule
2497 * Call AQ command to update a previously created switch rule with a
2500 static enum ice_status
2501 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2503 struct ice_aqc_sw_rules_elem *s_rule;
2504 enum ice_status status;
2506 s_rule = (struct ice_aqc_sw_rules_elem *)
2507 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2509 return ICE_ERR_NO_MEMORY;
2511 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2513 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2515 /* Update switch rule with new rule set to forward VSI list */
2516 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2517 ice_aqc_opc_update_sw_rules, NULL);
2519 ice_free(hw, s_rule);
2524 * ice_update_sw_rule_bridge_mode
2525 * @hw: pointer to the HW struct
2527 * Updates unicast switch filter rules based on VEB/VEPA mode
2529 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2531 struct ice_switch_info *sw = hw->switch_info;
2532 struct ice_fltr_mgmt_list_entry *fm_entry;
2533 enum ice_status status = ICE_SUCCESS;
2534 struct LIST_HEAD_TYPE *rule_head;
2535 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2537 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2538 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2540 ice_acquire_lock(rule_lock);
2541 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2543 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2544 u8 *addr = fi->l_data.mac.mac_addr;
2546 /* Update unicast Tx rules to reflect the selected
2549 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2550 (fi->fltr_act == ICE_FWD_TO_VSI ||
2551 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2552 fi->fltr_act == ICE_FWD_TO_Q ||
2553 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2554 status = ice_update_pkt_fwd_rule(hw, fi);
2560 ice_release_lock(rule_lock);
2566 * ice_add_update_vsi_list
2567 * @hw: pointer to the hardware structure
2568 * @m_entry: pointer to current filter management list entry
2569 * @cur_fltr: filter information from the book keeping entry
2570 * @new_fltr: filter information with the new VSI to be added
2572 * Call AQ command to add or update previously created VSI list with new VSI.
2574 * Helper function to do book keeping associated with adding filter information
2575 * The algorithm to do the book keeping is described below :
2576 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2577 * if only one VSI has been added till now
2578 * Allocate a new VSI list and add two VSIs
2579 * to this list using switch rule command
2580 * Update the previously created switch rule with the
2581 * newly created VSI list ID
2582 * if a VSI list was previously created
2583 * Add the new VSI to the previously created VSI list set
2584 * using the update switch rule command
2586 static enum ice_status
2587 ice_add_update_vsi_list(struct ice_hw *hw,
2588 struct ice_fltr_mgmt_list_entry *m_entry,
2589 struct ice_fltr_info *cur_fltr,
2590 struct ice_fltr_info *new_fltr)
2592 enum ice_status status = ICE_SUCCESS;
2593 u16 vsi_list_id = 0;
2595 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2596 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2597 return ICE_ERR_NOT_IMPL;
2599 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2600 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2601 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2602 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2603 return ICE_ERR_NOT_IMPL;
2605 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2606 /* Only one entry existed in the mapping and it was not already
2607 * a part of a VSI list. So, create a VSI list with the old and
2610 struct ice_fltr_info tmp_fltr;
2611 u16 vsi_handle_arr[2];
2613 /* A rule already exists with the new VSI being added */
2614 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2615 return ICE_ERR_ALREADY_EXISTS;
2617 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2618 vsi_handle_arr[1] = new_fltr->vsi_handle;
2619 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2621 new_fltr->lkup_type);
2625 tmp_fltr = *new_fltr;
2626 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2627 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2628 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2629 /* Update the previous switch rule of "MAC forward to VSI" to
2630 * "MAC fwd to VSI list"
2632 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2636 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2637 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2638 m_entry->vsi_list_info =
2639 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2642 /* If this entry was large action then the large action needs
2643 * to be updated to point to FWD to VSI list
2645 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2647 ice_add_marker_act(hw, m_entry,
2648 m_entry->sw_marker_id,
2649 m_entry->lg_act_idx);
2651 u16 vsi_handle = new_fltr->vsi_handle;
2652 enum ice_adminq_opc opcode;
2654 if (!m_entry->vsi_list_info)
2657 /* A rule already exists with the new VSI being added */
2658 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2661 /* Update the previously created VSI list set with
2662 * the new VSI ID passed in
2664 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2665 opcode = ice_aqc_opc_update_sw_rules;
2667 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2668 vsi_list_id, false, opcode,
2669 new_fltr->lkup_type);
2670 /* update VSI list mapping info with new VSI ID */
2672 ice_set_bit(vsi_handle,
2673 m_entry->vsi_list_info->vsi_map);
2676 m_entry->vsi_count++;
2681 * ice_find_rule_entry - Search a rule entry
2682 * @hw: pointer to the hardware structure
2683 * @recp_id: lookup type for which the specified rule needs to be searched
2684 * @f_info: rule information
2686 * Helper function to search for a given rule entry
2687 * Returns pointer to entry storing the rule if found
2689 static struct ice_fltr_mgmt_list_entry *
2690 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2692 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2693 struct ice_switch_info *sw = hw->switch_info;
2694 struct LIST_HEAD_TYPE *list_head;
2696 list_head = &sw->recp_list[recp_id].filt_rules;
2697 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2699 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2700 sizeof(f_info->l_data)) &&
2701 f_info->flag == list_itr->fltr_info.flag) {
2710 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2711 * @hw: pointer to the hardware structure
2712 * @recp_id: lookup type for which VSI lists needs to be searched
2713 * @vsi_handle: VSI handle to be found in VSI list
2714 * @vsi_list_id: VSI list ID found containing vsi_handle
2716 * Helper function to search a VSI list with single entry containing given VSI
2717 * handle element. This can be extended further to search VSI list with more
2718 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2720 static struct ice_vsi_list_map_info *
2721 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2724 struct ice_vsi_list_map_info *map_info = NULL;
2725 struct ice_switch_info *sw = hw->switch_info;
2726 struct LIST_HEAD_TYPE *list_head;
2728 list_head = &sw->recp_list[recp_id].filt_rules;
2729 if (sw->recp_list[recp_id].adv_rule) {
2730 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2732 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2733 ice_adv_fltr_mgmt_list_entry,
2735 if (list_itr->vsi_list_info) {
2736 map_info = list_itr->vsi_list_info;
2737 if (ice_is_bit_set(map_info->vsi_map,
2739 *vsi_list_id = map_info->vsi_list_id;
2745 struct ice_fltr_mgmt_list_entry *list_itr;
2747 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2748 ice_fltr_mgmt_list_entry,
2750 if (list_itr->vsi_count == 1 &&
2751 list_itr->vsi_list_info) {
2752 map_info = list_itr->vsi_list_info;
2753 if (ice_is_bit_set(map_info->vsi_map,
2755 *vsi_list_id = map_info->vsi_list_id;
2765 * ice_add_rule_internal - add rule for a given lookup type
2766 * @hw: pointer to the hardware structure
2767 * @recp_id: lookup type (recipe ID) for which rule has to be added
2768 * @f_entry: structure containing MAC forwarding information
2770 * Adds or updates the rule lists for a given recipe
2772 static enum ice_status
2773 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2774 struct ice_fltr_list_entry *f_entry)
2776 struct ice_switch_info *sw = hw->switch_info;
2777 struct ice_fltr_info *new_fltr, *cur_fltr;
2778 struct ice_fltr_mgmt_list_entry *m_entry;
2779 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2780 enum ice_status status = ICE_SUCCESS;
2782 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2783 return ICE_ERR_PARAM;
2785 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2786 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2787 f_entry->fltr_info.fwd_id.hw_vsi_id =
2788 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2790 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2792 ice_acquire_lock(rule_lock);
2793 new_fltr = &f_entry->fltr_info;
2794 if (new_fltr->flag & ICE_FLTR_RX)
2795 new_fltr->src = hw->port_info->lport;
2796 else if (new_fltr->flag & ICE_FLTR_TX)
2798 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2800 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2802 status = ice_create_pkt_fwd_rule(hw, f_entry);
2803 goto exit_add_rule_internal;
2806 cur_fltr = &m_entry->fltr_info;
2807 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2809 exit_add_rule_internal:
2810 ice_release_lock(rule_lock);
2815 * ice_remove_vsi_list_rule
2816 * @hw: pointer to the hardware structure
2817 * @vsi_list_id: VSI list ID generated as part of allocate resource
2818 * @lkup_type: switch rule filter lookup type
2820 * The VSI list should be emptied before this function is called to remove the
2823 static enum ice_status
2824 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2825 enum ice_sw_lkup_type lkup_type)
2827 struct ice_aqc_sw_rules_elem *s_rule;
2828 enum ice_status status;
2831 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2832 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2834 return ICE_ERR_NO_MEMORY;
2836 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2837 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2839 /* Free the vsi_list resource that we allocated. It is assumed that the
2840 * list is empty at this point.
2842 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2843 ice_aqc_opc_free_res);
2845 ice_free(hw, s_rule);
2850 * ice_rem_update_vsi_list
2851 * @hw: pointer to the hardware structure
2852 * @vsi_handle: VSI handle of the VSI to remove
2853 * @fm_list: filter management entry for which the VSI list management needs to
2856 static enum ice_status
2857 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2858 struct ice_fltr_mgmt_list_entry *fm_list)
2860 enum ice_sw_lkup_type lkup_type;
2861 enum ice_status status = ICE_SUCCESS;
2864 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2865 fm_list->vsi_count == 0)
2866 return ICE_ERR_PARAM;
2868 /* A rule with the VSI being removed does not exist */
2869 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2870 return ICE_ERR_DOES_NOT_EXIST;
2872 lkup_type = fm_list->fltr_info.lkup_type;
2873 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2874 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2875 ice_aqc_opc_update_sw_rules,
2880 fm_list->vsi_count--;
2881 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2883 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2884 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2885 struct ice_vsi_list_map_info *vsi_list_info =
2886 fm_list->vsi_list_info;
2889 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2891 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2892 return ICE_ERR_OUT_OF_RANGE;
2894 /* Make sure VSI list is empty before removing it below */
2895 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2897 ice_aqc_opc_update_sw_rules,
2902 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2903 tmp_fltr_info.fwd_id.hw_vsi_id =
2904 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2905 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2906 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2908 ice_debug(hw, ICE_DBG_SW,
2909 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2910 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2914 fm_list->fltr_info = tmp_fltr_info;
2917 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2918 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2919 struct ice_vsi_list_map_info *vsi_list_info =
2920 fm_list->vsi_list_info;
2922 /* Remove the VSI list since it is no longer used */
2923 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2925 ice_debug(hw, ICE_DBG_SW,
2926 "Failed to remove VSI list %d, error %d\n",
2927 vsi_list_id, status);
2931 LIST_DEL(&vsi_list_info->list_entry);
2932 ice_free(hw, vsi_list_info);
2933 fm_list->vsi_list_info = NULL;
2940 * ice_remove_rule_internal - Remove a filter rule of a given type
2942 * @hw: pointer to the hardware structure
2943 * @recp_id: recipe ID for which the rule needs to removed
2944 * @f_entry: rule entry containing filter information
2946 static enum ice_status
2947 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2948 struct ice_fltr_list_entry *f_entry)
2950 struct ice_switch_info *sw = hw->switch_info;
2951 struct ice_fltr_mgmt_list_entry *list_elem;
2952 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2953 enum ice_status status = ICE_SUCCESS;
2954 bool remove_rule = false;
2957 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2958 return ICE_ERR_PARAM;
2959 f_entry->fltr_info.fwd_id.hw_vsi_id =
2960 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2962 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2963 ice_acquire_lock(rule_lock);
2964 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2966 status = ICE_ERR_DOES_NOT_EXIST;
2970 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2972 } else if (!list_elem->vsi_list_info) {
2973 status = ICE_ERR_DOES_NOT_EXIST;
2975 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2976 /* a ref_cnt > 1 indicates that the vsi_list is being
2977 * shared by multiple rules. Decrement the ref_cnt and
2978 * remove this rule, but do not modify the list, as it
2979 * is in-use by other rules.
2981 list_elem->vsi_list_info->ref_cnt--;
2984 /* a ref_cnt of 1 indicates the vsi_list is only used
2985 * by one rule. However, the original removal request is only
2986 * for a single VSI. Update the vsi_list first, and only
2987 * remove the rule if there are no further VSIs in this list.
2989 vsi_handle = f_entry->fltr_info.vsi_handle;
2990 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2993 /* if VSI count goes to zero after updating the VSI list */
2994 if (list_elem->vsi_count == 0)
2999 /* Remove the lookup rule */
3000 struct ice_aqc_sw_rules_elem *s_rule;
3002 s_rule = (struct ice_aqc_sw_rules_elem *)
3003 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3005 status = ICE_ERR_NO_MEMORY;
3009 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3010 ice_aqc_opc_remove_sw_rules);
3012 status = ice_aq_sw_rules(hw, s_rule,
3013 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3014 ice_aqc_opc_remove_sw_rules, NULL);
3016 /* Remove a book keeping from the list */
3017 ice_free(hw, s_rule);
3022 LIST_DEL(&list_elem->list_entry);
3023 ice_free(hw, list_elem);
3026 ice_release_lock(rule_lock);
3031 * ice_aq_get_res_alloc - get allocated resources
3032 * @hw: pointer to the HW struct
3033 * @num_entries: pointer to u16 to store the number of resource entries returned
3034 * @buf: pointer to user-supplied buffer
3035 * @buf_size: size of buff
3036 * @cd: pointer to command details structure or NULL
3038 * The user-supplied buffer must be large enough to store the resource
3039 * information for all resource types. Each resource type is an
3040 * ice_aqc_get_res_resp_data_elem structure.
3043 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
3044 u16 buf_size, struct ice_sq_cd *cd)
3046 struct ice_aqc_get_res_alloc *resp;
3047 enum ice_status status;
3048 struct ice_aq_desc desc;
3051 return ICE_ERR_BAD_PTR;
3053 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3054 return ICE_ERR_INVAL_SIZE;
3056 resp = &desc.params.get_res;
3058 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3059 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3061 if (!status && num_entries)
3062 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3068 * ice_aq_get_res_descs - get allocated resource descriptors
3069 * @hw: pointer to the hardware structure
3070 * @num_entries: number of resource entries in buffer
3071 * @buf: Indirect buffer to hold data parameters and response
3072 * @buf_size: size of buffer for indirect commands
3073 * @res_type: resource type
3074 * @res_shared: is resource shared
3075 * @desc_id: input - first desc ID to start; output - next desc ID
3076 * @cd: pointer to command details structure or NULL
3079 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3080 struct ice_aqc_get_allocd_res_desc_resp *buf,
3081 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
3082 struct ice_sq_cd *cd)
3084 struct ice_aqc_get_allocd_res_desc *cmd;
3085 struct ice_aq_desc desc;
3086 enum ice_status status;
3088 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3090 cmd = &desc.params.get_res_desc;
3093 return ICE_ERR_PARAM;
3095 if (buf_size != (num_entries * sizeof(*buf)))
3096 return ICE_ERR_PARAM;
3098 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3100 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3101 ICE_AQC_RES_TYPE_M) | (res_shared ?
3102 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3103 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3105 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3107 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3113 * ice_add_mac - Add a MAC address based filter rule
3114 * @hw: pointer to the hardware structure
3115 * @m_list: list of MAC addresses and forwarding information
3117 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3118 * multiple unicast addresses, the function assumes that all the
3119 * addresses are unique in a given add_mac call. It doesn't
3120 * check for duplicates in this case, removing duplicates from a given
3121 * list should be taken care of in the caller of this function.
3124 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3126 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3127 struct ice_fltr_list_entry *m_list_itr;
3128 struct LIST_HEAD_TYPE *rule_head;
3129 u16 elem_sent, total_elem_left;
3130 struct ice_switch_info *sw;
3131 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3132 enum ice_status status = ICE_SUCCESS;
3133 u16 num_unicast = 0;
3137 return ICE_ERR_PARAM;
3139 sw = hw->switch_info;
3140 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3141 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3143 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3147 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3148 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3149 if (!ice_is_vsi_valid(hw, vsi_handle))
3150 return ICE_ERR_PARAM;
3151 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3152 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3153 /* update the src in case it is VSI num */
3154 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3155 return ICE_ERR_PARAM;
3156 m_list_itr->fltr_info.src = hw_vsi_id;
3157 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3158 IS_ZERO_ETHER_ADDR(add))
3159 return ICE_ERR_PARAM;
3160 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3161 /* Don't overwrite the unicast address */
3162 ice_acquire_lock(rule_lock);
3163 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3164 &m_list_itr->fltr_info)) {
3165 ice_release_lock(rule_lock);
3166 return ICE_ERR_ALREADY_EXISTS;
3168 ice_release_lock(rule_lock);
3170 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3171 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3172 m_list_itr->status =
3173 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3175 if (m_list_itr->status)
3176 return m_list_itr->status;
3180 ice_acquire_lock(rule_lock);
3181 /* Exit if no suitable entries were found for adding bulk switch rule */
3183 status = ICE_SUCCESS;
3184 goto ice_add_mac_exit;
3187 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3189 /* Allocate switch rule buffer for the bulk update for unicast */
3190 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3191 s_rule = (struct ice_aqc_sw_rules_elem *)
3192 ice_calloc(hw, num_unicast, s_rule_size);
3194 status = ICE_ERR_NO_MEMORY;
3195 goto ice_add_mac_exit;
3199 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3201 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3202 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3204 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3205 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3206 ice_aqc_opc_add_sw_rules);
3207 r_iter = (struct ice_aqc_sw_rules_elem *)
3208 ((u8 *)r_iter + s_rule_size);
3212 /* Call AQ bulk switch rule update for all unicast addresses */
3214 /* Call AQ switch rule in AQ_MAX chunk */
3215 for (total_elem_left = num_unicast; total_elem_left > 0;
3216 total_elem_left -= elem_sent) {
3217 struct ice_aqc_sw_rules_elem *entry = r_iter;
3219 elem_sent = min(total_elem_left,
3220 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
3221 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3222 elem_sent, ice_aqc_opc_add_sw_rules,
3225 goto ice_add_mac_exit;
3226 r_iter = (struct ice_aqc_sw_rules_elem *)
3227 ((u8 *)r_iter + (elem_sent * s_rule_size));
3230 /* Fill up rule ID based on the value returned from FW */
3232 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3234 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3235 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3236 struct ice_fltr_mgmt_list_entry *fm_entry;
3238 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
3239 f_info->fltr_rule_id =
3240 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
3241 f_info->fltr_act = ICE_FWD_TO_VSI;
3242 /* Create an entry to track this MAC address */
3243 fm_entry = (struct ice_fltr_mgmt_list_entry *)
3244 ice_malloc(hw, sizeof(*fm_entry));
3246 status = ICE_ERR_NO_MEMORY;
3247 goto ice_add_mac_exit;
3249 fm_entry->fltr_info = *f_info;
3250 fm_entry->vsi_count = 1;
3251 /* The book keeping entries will get removed when
3252 * base driver calls remove filter AQ command
3255 LIST_ADD(&fm_entry->list_entry, rule_head);
3256 r_iter = (struct ice_aqc_sw_rules_elem *)
3257 ((u8 *)r_iter + s_rule_size);
3262 ice_release_lock(rule_lock);
3264 ice_free(hw, s_rule);
3269 * ice_add_vlan_internal - Add one VLAN based filter rule
3270 * @hw: pointer to the hardware structure
3271 * @f_entry: filter entry containing one VLAN information
3273 static enum ice_status
3274 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3276 struct ice_switch_info *sw = hw->switch_info;
3277 struct ice_fltr_mgmt_list_entry *v_list_itr;
3278 struct ice_fltr_info *new_fltr, *cur_fltr;
3279 enum ice_sw_lkup_type lkup_type;
3280 u16 vsi_list_id = 0, vsi_handle;
3281 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3282 enum ice_status status = ICE_SUCCESS;
3284 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3285 return ICE_ERR_PARAM;
3287 f_entry->fltr_info.fwd_id.hw_vsi_id =
3288 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3289 new_fltr = &f_entry->fltr_info;
3291 /* VLAN ID should only be 12 bits */
3292 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3293 return ICE_ERR_PARAM;
3295 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3296 return ICE_ERR_PARAM;
3298 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3299 lkup_type = new_fltr->lkup_type;
3300 vsi_handle = new_fltr->vsi_handle;
3301 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3302 ice_acquire_lock(rule_lock);
3303 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3305 struct ice_vsi_list_map_info *map_info = NULL;
3307 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3308 /* All VLAN pruning rules use a VSI list. Check if
3309 * there is already a VSI list containing VSI that we
3310 * want to add. If found, use the same vsi_list_id for
3311 * this new VLAN rule or else create a new list.
3313 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3317 status = ice_create_vsi_list_rule(hw,
3325 /* Convert the action to forwarding to a VSI list. */
3326 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3327 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3330 status = ice_create_pkt_fwd_rule(hw, f_entry);
3332 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3335 status = ICE_ERR_DOES_NOT_EXIST;
3338 /* reuse VSI list for new rule and increment ref_cnt */
3340 v_list_itr->vsi_list_info = map_info;
3341 map_info->ref_cnt++;
3343 v_list_itr->vsi_list_info =
3344 ice_create_vsi_list_map(hw, &vsi_handle,
3348 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3349 /* Update existing VSI list to add new VSI ID only if it used
3352 cur_fltr = &v_list_itr->fltr_info;
3353 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3356 /* If VLAN rule exists and VSI list being used by this rule is
3357 * referenced by more than 1 VLAN rule. Then create a new VSI
3358 * list appending previous VSI with new VSI and update existing
3359 * VLAN rule to point to new VSI list ID
3361 struct ice_fltr_info tmp_fltr;
3362 u16 vsi_handle_arr[2];
3365 /* Current implementation only supports reusing VSI list with
3366 * one VSI count. We should never hit below condition
3368 if (v_list_itr->vsi_count > 1 &&
3369 v_list_itr->vsi_list_info->ref_cnt > 1) {
3370 ice_debug(hw, ICE_DBG_SW,
3371 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3372 status = ICE_ERR_CFG;
3377 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3380 /* A rule already exists with the new VSI being added */
3381 if (cur_handle == vsi_handle) {
3382 status = ICE_ERR_ALREADY_EXISTS;
3386 vsi_handle_arr[0] = cur_handle;
3387 vsi_handle_arr[1] = vsi_handle;
3388 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3389 &vsi_list_id, lkup_type);
3393 tmp_fltr = v_list_itr->fltr_info;
3394 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3395 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3396 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3397 /* Update the previous switch rule to a new VSI list which
3398 * includes current VSI that is requested
3400 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3404 /* before overriding VSI list map info. decrement ref_cnt of
3407 v_list_itr->vsi_list_info->ref_cnt--;
3409 /* now update to newly created list */
3410 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3411 v_list_itr->vsi_list_info =
3412 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3414 v_list_itr->vsi_count++;
3418 ice_release_lock(rule_lock);
3423 * ice_add_vlan - Add VLAN based filter rule
3424 * @hw: pointer to the hardware structure
3425 * @v_list: list of VLAN entries and forwarding information
3428 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3430 struct ice_fltr_list_entry *v_list_itr;
3433 return ICE_ERR_PARAM;
3435 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3437 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3438 return ICE_ERR_PARAM;
3439 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3440 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3441 if (v_list_itr->status)
3442 return v_list_itr->status;
3448 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3449 * @hw: pointer to the hardware structure
3450 * @mv_list: list of MAC and VLAN filters
3452 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3453 * pruning bits enabled, then it is the responsibility of the caller to make
3454 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3455 * VLAN won't be received on that VSI otherwise.
3458 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3460 struct ice_fltr_list_entry *mv_list_itr;
3462 if (!mv_list || !hw)
3463 return ICE_ERR_PARAM;
3465 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3467 enum ice_sw_lkup_type l_type =
3468 mv_list_itr->fltr_info.lkup_type;
3470 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3471 return ICE_ERR_PARAM;
3472 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3473 mv_list_itr->status =
3474 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3476 if (mv_list_itr->status)
3477 return mv_list_itr->status;
3483 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3484 * @hw: pointer to the hardware structure
3485 * @em_list: list of ether type MAC filter, MAC is optional
3487 * This function requires the caller to populate the entries in
3488 * the filter list with the necessary fields (including flags to
3489 * indicate Tx or Rx rules).
3492 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3494 struct ice_fltr_list_entry *em_list_itr;
3496 if (!em_list || !hw)
3497 return ICE_ERR_PARAM;
3499 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3501 enum ice_sw_lkup_type l_type =
3502 em_list_itr->fltr_info.lkup_type;
3504 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3505 l_type != ICE_SW_LKUP_ETHERTYPE)
3506 return ICE_ERR_PARAM;
3508 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3510 if (em_list_itr->status)
3511 return em_list_itr->status;
3517 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3518 * @hw: pointer to the hardware structure
3519 * @em_list: list of ethertype or ethertype MAC entries
3522 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3524 struct ice_fltr_list_entry *em_list_itr, *tmp;
3526 if (!em_list || !hw)
3527 return ICE_ERR_PARAM;
3529 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3531 enum ice_sw_lkup_type l_type =
3532 em_list_itr->fltr_info.lkup_type;
3534 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3535 l_type != ICE_SW_LKUP_ETHERTYPE)
3536 return ICE_ERR_PARAM;
3538 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3540 if (em_list_itr->status)
3541 return em_list_itr->status;
3547 * ice_rem_sw_rule_info
3548 * @hw: pointer to the hardware structure
3549 * @rule_head: pointer to the switch list structure that we want to delete
3552 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3554 if (!LIST_EMPTY(rule_head)) {
3555 struct ice_fltr_mgmt_list_entry *entry;
3556 struct ice_fltr_mgmt_list_entry *tmp;
3558 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3559 ice_fltr_mgmt_list_entry, list_entry) {
3560 LIST_DEL(&entry->list_entry);
3561 ice_free(hw, entry);
3567 * ice_rem_adv_rule_info
3568 * @hw: pointer to the hardware structure
3569 * @rule_head: pointer to the switch list structure that we want to delete
3572 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3574 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3575 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3577 if (LIST_EMPTY(rule_head))
3580 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3581 ice_adv_fltr_mgmt_list_entry, list_entry) {
3582 LIST_DEL(&lst_itr->list_entry);
3583 ice_free(hw, lst_itr->lkups);
3584 ice_free(hw, lst_itr);
3589 * ice_rem_all_sw_rules_info
3590 * @hw: pointer to the hardware structure
3592 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3594 struct ice_switch_info *sw = hw->switch_info;
3597 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3598 struct LIST_HEAD_TYPE *rule_head;
3600 rule_head = &sw->recp_list[i].filt_rules;
3601 if (!sw->recp_list[i].adv_rule)
3602 ice_rem_sw_rule_info(hw, rule_head);
3604 ice_rem_adv_rule_info(hw, rule_head);
3609 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3610 * @pi: pointer to the port_info structure
3611 * @vsi_handle: VSI handle to set as default
3612 * @set: true to add the above mentioned switch rule, false to remove it
3613 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3615 * add filter rule to set/unset given VSI as default VSI for the switch
3616 * (represented by swid)
3619 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3622 struct ice_aqc_sw_rules_elem *s_rule;
3623 struct ice_fltr_info f_info;
3624 struct ice_hw *hw = pi->hw;
3625 enum ice_adminq_opc opcode;
3626 enum ice_status status;
3630 if (!ice_is_vsi_valid(hw, vsi_handle))
3631 return ICE_ERR_PARAM;
3632 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3634 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3635 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3636 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3638 return ICE_ERR_NO_MEMORY;
3640 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3642 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3643 f_info.flag = direction;
3644 f_info.fltr_act = ICE_FWD_TO_VSI;
3645 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3647 if (f_info.flag & ICE_FLTR_RX) {
3648 f_info.src = pi->lport;
3649 f_info.src_id = ICE_SRC_ID_LPORT;
3651 f_info.fltr_rule_id =
3652 pi->dflt_rx_vsi_rule_id;
3653 } else if (f_info.flag & ICE_FLTR_TX) {
3654 f_info.src_id = ICE_SRC_ID_VSI;
3655 f_info.src = hw_vsi_id;
3657 f_info.fltr_rule_id =
3658 pi->dflt_tx_vsi_rule_id;
3662 opcode = ice_aqc_opc_add_sw_rules;
3664 opcode = ice_aqc_opc_remove_sw_rules;
3666 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3668 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3669 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3672 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3674 if (f_info.flag & ICE_FLTR_TX) {
3675 pi->dflt_tx_vsi_num = hw_vsi_id;
3676 pi->dflt_tx_vsi_rule_id = index;
3677 } else if (f_info.flag & ICE_FLTR_RX) {
3678 pi->dflt_rx_vsi_num = hw_vsi_id;
3679 pi->dflt_rx_vsi_rule_id = index;
3682 if (f_info.flag & ICE_FLTR_TX) {
3683 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3684 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3685 } else if (f_info.flag & ICE_FLTR_RX) {
3686 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3687 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3692 ice_free(hw, s_rule);
3697 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3698 * @hw: pointer to the hardware structure
3699 * @recp_id: lookup type for which the specified rule needs to be searched
3700 * @f_info: rule information
3702 * Helper function to search for a unicast rule entry - this is to be used
3703 * to remove unicast MAC filter that is not shared with other VSIs on the
3706 * Returns pointer to entry storing the rule if found
3708 static struct ice_fltr_mgmt_list_entry *
3709 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3710 struct ice_fltr_info *f_info)
3712 struct ice_switch_info *sw = hw->switch_info;
3713 struct ice_fltr_mgmt_list_entry *list_itr;
3714 struct LIST_HEAD_TYPE *list_head;
3716 list_head = &sw->recp_list[recp_id].filt_rules;
3717 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3719 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3720 sizeof(f_info->l_data)) &&
3721 f_info->fwd_id.hw_vsi_id ==
3722 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3723 f_info->flag == list_itr->fltr_info.flag)
3730 * ice_remove_mac - remove a MAC address based filter rule
3731 * @hw: pointer to the hardware structure
3732 * @m_list: list of MAC addresses and forwarding information
3734 * This function removes either a MAC filter rule or a specific VSI from a
3735 * VSI list for a multicast MAC address.
3737 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3738 * ice_add_mac. Caller should be aware that this call will only work if all
3739 * the entries passed into m_list were added previously. It will not attempt to
3740 * do a partial remove of entries that were found.
3743 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3745 struct ice_fltr_list_entry *list_itr, *tmp;
3746 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3749 return ICE_ERR_PARAM;
3751 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3752 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3754 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3755 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3758 if (l_type != ICE_SW_LKUP_MAC)
3759 return ICE_ERR_PARAM;
3761 vsi_handle = list_itr->fltr_info.vsi_handle;
3762 if (!ice_is_vsi_valid(hw, vsi_handle))
3763 return ICE_ERR_PARAM;
3765 list_itr->fltr_info.fwd_id.hw_vsi_id =
3766 ice_get_hw_vsi_num(hw, vsi_handle);
3767 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3768 /* Don't remove the unicast address that belongs to
3769 * another VSI on the switch, since it is not being
3772 ice_acquire_lock(rule_lock);
3773 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3774 &list_itr->fltr_info)) {
3775 ice_release_lock(rule_lock);
3776 return ICE_ERR_DOES_NOT_EXIST;
3778 ice_release_lock(rule_lock);
3780 list_itr->status = ice_remove_rule_internal(hw,
3783 if (list_itr->status)
3784 return list_itr->status;
3790 * ice_remove_vlan - Remove VLAN based filter rule
3791 * @hw: pointer to the hardware structure
3792 * @v_list: list of VLAN entries and forwarding information
3795 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3797 struct ice_fltr_list_entry *v_list_itr, *tmp;
3800 return ICE_ERR_PARAM;
3802 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3804 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3806 if (l_type != ICE_SW_LKUP_VLAN)
3807 return ICE_ERR_PARAM;
3808 v_list_itr->status = ice_remove_rule_internal(hw,
3811 if (v_list_itr->status)
3812 return v_list_itr->status;
3818 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3819 * @hw: pointer to the hardware structure
3820 * @v_list: list of MAC VLAN entries and forwarding information
3823 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3825 struct ice_fltr_list_entry *v_list_itr, *tmp;
3828 return ICE_ERR_PARAM;
3830 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3832 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3834 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3835 return ICE_ERR_PARAM;
3836 v_list_itr->status =
3837 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3839 if (v_list_itr->status)
3840 return v_list_itr->status;
3846 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3847 * @fm_entry: filter entry to inspect
3848 * @vsi_handle: VSI handle to compare with filter info
3851 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3853 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3854 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3855 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3856 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3861 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3862 * @hw: pointer to the hardware structure
3863 * @vsi_handle: VSI handle to remove filters from
3864 * @vsi_list_head: pointer to the list to add entry to
3865 * @fi: pointer to fltr_info of filter entry to copy & add
3867 * Helper function, used when creating a list of filters to remove from
3868 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3869 * original filter entry, with the exception of fltr_info.fltr_act and
3870 * fltr_info.fwd_id fields. These are set such that later logic can
3871 * extract which VSI to remove the fltr from, and pass on that information.
3873 static enum ice_status
3874 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3875 struct LIST_HEAD_TYPE *vsi_list_head,
3876 struct ice_fltr_info *fi)
3878 struct ice_fltr_list_entry *tmp;
3880 /* this memory is freed up in the caller function
3881 * once filters for this VSI are removed
3883 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3885 return ICE_ERR_NO_MEMORY;
3887 tmp->fltr_info = *fi;
3889 /* Overwrite these fields to indicate which VSI to remove filter from,
3890 * so find and remove logic can extract the information from the
3891 * list entries. Note that original entries will still have proper
3894 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3895 tmp->fltr_info.vsi_handle = vsi_handle;
3896 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3898 LIST_ADD(&tmp->list_entry, vsi_list_head);
3904 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3905 * @hw: pointer to the hardware structure
3906 * @vsi_handle: VSI handle to remove filters from
3907 * @lkup_list_head: pointer to the list that has certain lookup type filters
3908 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3910 * Locates all filters in lkup_list_head that are used by the given VSI,
3911 * and adds COPIES of those entries to vsi_list_head (intended to be used
3912 * to remove the listed filters).
3913 * Note that this means all entries in vsi_list_head must be explicitly
3914 * deallocated by the caller when done with list.
3916 static enum ice_status
3917 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3918 struct LIST_HEAD_TYPE *lkup_list_head,
3919 struct LIST_HEAD_TYPE *vsi_list_head)
3921 struct ice_fltr_mgmt_list_entry *fm_entry;
3922 enum ice_status status = ICE_SUCCESS;
3924 /* check to make sure VSI ID is valid and within boundary */
3925 if (!ice_is_vsi_valid(hw, vsi_handle))
3926 return ICE_ERR_PARAM;
3928 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3929 ice_fltr_mgmt_list_entry, list_entry) {
3930 struct ice_fltr_info *fi;
3932 fi = &fm_entry->fltr_info;
3933 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3936 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3945 * ice_determine_promisc_mask
3946 * @fi: filter info to parse
3948 * Helper function to determine which ICE_PROMISC_ mask corresponds
3949 * to given filter into.
3951 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3953 u16 vid = fi->l_data.mac_vlan.vlan_id;
3954 u8 *macaddr = fi->l_data.mac.mac_addr;
3955 bool is_tx_fltr = false;
3956 u8 promisc_mask = 0;
3958 if (fi->flag == ICE_FLTR_TX)
3961 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3962 promisc_mask |= is_tx_fltr ?
3963 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3964 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3965 promisc_mask |= is_tx_fltr ?
3966 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3967 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3968 promisc_mask |= is_tx_fltr ?
3969 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3971 promisc_mask |= is_tx_fltr ?
3972 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3974 return promisc_mask;
3978 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3979 * @hw: pointer to the hardware structure
3980 * @vsi_handle: VSI handle to retrieve info from
3981 * @promisc_mask: pointer to mask to be filled in
3982 * @vid: VLAN ID of promisc VLAN VSI
3985 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3988 struct ice_switch_info *sw = hw->switch_info;
3989 struct ice_fltr_mgmt_list_entry *itr;
3990 struct LIST_HEAD_TYPE *rule_head;
3991 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3993 if (!ice_is_vsi_valid(hw, vsi_handle))
3994 return ICE_ERR_PARAM;
3998 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3999 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4001 ice_acquire_lock(rule_lock);
4002 LIST_FOR_EACH_ENTRY(itr, rule_head,
4003 ice_fltr_mgmt_list_entry, list_entry) {
4004 /* Continue if this filter doesn't apply to this VSI or the
4005 * VSI ID is not in the VSI map for this filter
4007 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4010 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4012 ice_release_lock(rule_lock);
4018 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4019 * @hw: pointer to the hardware structure
4020 * @vsi_handle: VSI handle to retrieve info from
4021 * @promisc_mask: pointer to mask to be filled in
4022 * @vid: VLAN ID of promisc VLAN VSI
4025 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4028 struct ice_switch_info *sw = hw->switch_info;
4029 struct ice_fltr_mgmt_list_entry *itr;
4030 struct LIST_HEAD_TYPE *rule_head;
4031 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4033 if (!ice_is_vsi_valid(hw, vsi_handle))
4034 return ICE_ERR_PARAM;
4038 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4039 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4041 ice_acquire_lock(rule_lock);
4042 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4044 /* Continue if this filter doesn't apply to this VSI or the
4045 * VSI ID is not in the VSI map for this filter
4047 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4050 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4052 ice_release_lock(rule_lock);
4058 * ice_remove_promisc - Remove promisc based filter rules
4059 * @hw: pointer to the hardware structure
4060 * @recp_id: recipe ID for which the rule needs to removed
4061 * @v_list: list of promisc entries
4063 static enum ice_status
4064 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
4065 struct LIST_HEAD_TYPE *v_list)
4067 struct ice_fltr_list_entry *v_list_itr, *tmp;
4069 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4071 v_list_itr->status =
4072 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4073 if (v_list_itr->status)
4074 return v_list_itr->status;
4080 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4081 * @hw: pointer to the hardware structure
4082 * @vsi_handle: VSI handle to clear mode
4083 * @promisc_mask: mask of promiscuous config bits to clear
4084 * @vid: VLAN ID to clear VLAN promiscuous
4087 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4090 struct ice_switch_info *sw = hw->switch_info;
4091 struct ice_fltr_list_entry *fm_entry, *tmp;
4092 struct LIST_HEAD_TYPE remove_list_head;
4093 struct ice_fltr_mgmt_list_entry *itr;
4094 struct LIST_HEAD_TYPE *rule_head;
4095 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4096 enum ice_status status = ICE_SUCCESS;
4099 if (!ice_is_vsi_valid(hw, vsi_handle))
4100 return ICE_ERR_PARAM;
4102 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4103 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4105 recipe_id = ICE_SW_LKUP_PROMISC;
4107 rule_head = &sw->recp_list[recipe_id].filt_rules;
4108 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4110 INIT_LIST_HEAD(&remove_list_head);
4112 ice_acquire_lock(rule_lock);
4113 LIST_FOR_EACH_ENTRY(itr, rule_head,
4114 ice_fltr_mgmt_list_entry, list_entry) {
4115 struct ice_fltr_info *fltr_info;
4116 u8 fltr_promisc_mask = 0;
4118 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4120 fltr_info = &itr->fltr_info;
4122 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4123 vid != fltr_info->l_data.mac_vlan.vlan_id)
4126 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4128 /* Skip if filter is not completely specified by given mask */
4129 if (fltr_promisc_mask & ~promisc_mask)
4132 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4136 ice_release_lock(rule_lock);
4137 goto free_fltr_list;
4140 ice_release_lock(rule_lock);
4142 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4145 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4146 ice_fltr_list_entry, list_entry) {
4147 LIST_DEL(&fm_entry->list_entry);
4148 ice_free(hw, fm_entry);
4155 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4156 * @hw: pointer to the hardware structure
4157 * @vsi_handle: VSI handle to configure
4158 * @promisc_mask: mask of promiscuous config bits
4159 * @vid: VLAN ID to set VLAN promiscuous
4162 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4164 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4165 struct ice_fltr_list_entry f_list_entry;
4166 struct ice_fltr_info new_fltr;
4167 enum ice_status status = ICE_SUCCESS;
4173 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4175 if (!ice_is_vsi_valid(hw, vsi_handle))
4176 return ICE_ERR_PARAM;
4177 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4179 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
4181 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4182 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4183 new_fltr.l_data.mac_vlan.vlan_id = vid;
4184 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4186 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4187 recipe_id = ICE_SW_LKUP_PROMISC;
4190 /* Separate filters must be set for each direction/packet type
4191 * combination, so we will loop over the mask value, store the
4192 * individual type, and clear it out in the input mask as it
4195 while (promisc_mask) {
4201 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4202 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4203 pkt_type = UCAST_FLTR;
4204 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4205 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4206 pkt_type = UCAST_FLTR;
4208 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4209 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4210 pkt_type = MCAST_FLTR;
4211 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4212 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4213 pkt_type = MCAST_FLTR;
4215 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4216 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4217 pkt_type = BCAST_FLTR;
4218 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4219 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4220 pkt_type = BCAST_FLTR;
4224 /* Check for VLAN promiscuous flag */
4225 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4226 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4227 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4228 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4232 /* Set filter DA based on packet type */
4233 mac_addr = new_fltr.l_data.mac.mac_addr;
4234 if (pkt_type == BCAST_FLTR) {
4235 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
4236 } else if (pkt_type == MCAST_FLTR ||
4237 pkt_type == UCAST_FLTR) {
4238 /* Use the dummy ether header DA */
4239 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
4240 ICE_NONDMA_TO_NONDMA);
4241 if (pkt_type == MCAST_FLTR)
4242 mac_addr[0] |= 0x1; /* Set multicast bit */
4245 /* Need to reset this to zero for all iterations */
4248 new_fltr.flag |= ICE_FLTR_TX;
4249 new_fltr.src = hw_vsi_id;
4251 new_fltr.flag |= ICE_FLTR_RX;
4252 new_fltr.src = hw->port_info->lport;
4255 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4256 new_fltr.vsi_handle = vsi_handle;
4257 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4258 f_list_entry.fltr_info = new_fltr;
4260 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4261 if (status != ICE_SUCCESS)
4262 goto set_promisc_exit;
4270 * ice_set_vlan_vsi_promisc
4271 * @hw: pointer to the hardware structure
4272 * @vsi_handle: VSI handle to configure
4273 * @promisc_mask: mask of promiscuous config bits
4274 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4276 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4279 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4280 bool rm_vlan_promisc)
4282 struct ice_switch_info *sw = hw->switch_info;
4283 struct ice_fltr_list_entry *list_itr, *tmp;
4284 struct LIST_HEAD_TYPE vsi_list_head;
4285 struct LIST_HEAD_TYPE *vlan_head;
4286 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
4287 enum ice_status status;
4290 INIT_LIST_HEAD(&vsi_list_head);
4291 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4292 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4293 ice_acquire_lock(vlan_lock);
4294 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4296 ice_release_lock(vlan_lock);
4298 goto free_fltr_list;
4300 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
4302 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4303 if (rm_vlan_promisc)
4304 status = ice_clear_vsi_promisc(hw, vsi_handle,
4305 promisc_mask, vlan_id);
4307 status = ice_set_vsi_promisc(hw, vsi_handle,
4308 promisc_mask, vlan_id);
4314 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
4315 ice_fltr_list_entry, list_entry) {
4316 LIST_DEL(&list_itr->list_entry);
4317 ice_free(hw, list_itr);
4323 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4324 * @hw: pointer to the hardware structure
4325 * @vsi_handle: VSI handle to remove filters from
4326 * @lkup: switch rule filter lookup type
4329 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4330 enum ice_sw_lkup_type lkup)
4332 struct ice_switch_info *sw = hw->switch_info;
4333 struct ice_fltr_list_entry *fm_entry;
4334 struct LIST_HEAD_TYPE remove_list_head;
4335 struct LIST_HEAD_TYPE *rule_head;
4336 struct ice_fltr_list_entry *tmp;
4337 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4338 enum ice_status status;
4340 INIT_LIST_HEAD(&remove_list_head);
4341 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4342 rule_head = &sw->recp_list[lkup].filt_rules;
4343 ice_acquire_lock(rule_lock);
4344 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4346 ice_release_lock(rule_lock);
4351 case ICE_SW_LKUP_MAC:
4352 ice_remove_mac(hw, &remove_list_head);
4354 case ICE_SW_LKUP_VLAN:
4355 ice_remove_vlan(hw, &remove_list_head);
4357 case ICE_SW_LKUP_PROMISC:
4358 case ICE_SW_LKUP_PROMISC_VLAN:
4359 ice_remove_promisc(hw, lkup, &remove_list_head);
4361 case ICE_SW_LKUP_MAC_VLAN:
4362 ice_remove_mac_vlan(hw, &remove_list_head);
4364 case ICE_SW_LKUP_ETHERTYPE:
4365 case ICE_SW_LKUP_ETHERTYPE_MAC:
4366 ice_remove_eth_mac(hw, &remove_list_head);
4368 case ICE_SW_LKUP_DFLT:
4369 ice_debug(hw, ICE_DBG_SW,
4370 "Remove filters for this lookup type hasn't been implemented yet\n");
4372 case ICE_SW_LKUP_LAST:
4373 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
4377 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
4378 ice_fltr_list_entry, list_entry) {
4379 LIST_DEL(&fm_entry->list_entry);
4380 ice_free(hw, fm_entry);
4385 * ice_remove_vsi_fltr - Remove all filters for a VSI
4386 * @hw: pointer to the hardware structure
4387 * @vsi_handle: VSI handle to remove filters from
4389 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4391 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4393 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4394 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4395 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4396 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4397 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4398 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4399 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4400 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4404 * ice_alloc_res_cntr - allocating resource counter
4405 * @hw: pointer to the hardware structure
4406 * @type: type of resource
4407 * @alloc_shared: if set it is shared else dedicated
4408 * @num_items: number of entries requested for FD resource type
4409 * @counter_id: counter index returned by AQ call
4412 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4415 struct ice_aqc_alloc_free_res_elem *buf;
4416 enum ice_status status;
4419 /* Allocate resource */
4420 buf_len = sizeof(*buf);
4421 buf = (struct ice_aqc_alloc_free_res_elem *)
4422 ice_malloc(hw, buf_len);
4424 return ICE_ERR_NO_MEMORY;
4426 buf->num_elems = CPU_TO_LE16(num_items);
4427 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4428 ICE_AQC_RES_TYPE_M) | alloc_shared);
4430 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4431 ice_aqc_opc_alloc_res, NULL);
4435 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4443 * ice_free_res_cntr - free resource counter
4444 * @hw: pointer to the hardware structure
4445 * @type: type of resource
4446 * @alloc_shared: if set it is shared else dedicated
4447 * @num_items: number of entries to be freed for FD resource type
4448 * @counter_id: counter ID resource which needs to be freed
4451 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4454 struct ice_aqc_alloc_free_res_elem *buf;
4455 enum ice_status status;
4459 buf_len = sizeof(*buf);
4460 buf = (struct ice_aqc_alloc_free_res_elem *)
4461 ice_malloc(hw, buf_len);
4463 return ICE_ERR_NO_MEMORY;
4465 buf->num_elems = CPU_TO_LE16(num_items);
4466 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4467 ICE_AQC_RES_TYPE_M) | alloc_shared);
4468 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4470 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4471 ice_aqc_opc_free_res, NULL);
4473 ice_debug(hw, ICE_DBG_SW,
4474 "counter resource could not be freed\n");
4481 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4482 * @hw: pointer to the hardware structure
4483 * @counter_id: returns counter index
4485 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4487 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4488 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4493 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4494 * @hw: pointer to the hardware structure
4495 * @counter_id: counter index to be freed
4497 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4499 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4500 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4505 * ice_alloc_res_lg_act - add large action resource
4506 * @hw: pointer to the hardware structure
4507 * @l_id: large action ID to fill it in
4508 * @num_acts: number of actions to hold with a large action entry
4510 static enum ice_status
4511 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4513 struct ice_aqc_alloc_free_res_elem *sw_buf;
4514 enum ice_status status;
4517 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4518 return ICE_ERR_PARAM;
4520 /* Allocate resource for large action */
4521 buf_len = sizeof(*sw_buf);
4522 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4523 ice_malloc(hw, buf_len);
4525 return ICE_ERR_NO_MEMORY;
4527 sw_buf->num_elems = CPU_TO_LE16(1);
4529 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4530 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4531 * If num_acts is greater than 2, then use
4532 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4533 * The num_acts cannot exceed 4. This was ensured at the
4534 * beginning of the function.
4537 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4538 else if (num_acts == 2)
4539 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4541 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4543 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4544 ice_aqc_opc_alloc_res, NULL);
4546 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4548 ice_free(hw, sw_buf);
4553 * ice_add_mac_with_sw_marker - add filter with sw marker
4554 * @hw: pointer to the hardware structure
4555 * @f_info: filter info structure containing the MAC filter information
4556 * @sw_marker: sw marker to tag the Rx descriptor with
4559 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4562 struct ice_switch_info *sw = hw->switch_info;
4563 struct ice_fltr_mgmt_list_entry *m_entry;
4564 struct ice_fltr_list_entry fl_info;
4565 struct LIST_HEAD_TYPE l_head;
4566 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4567 enum ice_status ret;
4571 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4572 return ICE_ERR_PARAM;
4574 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4575 return ICE_ERR_PARAM;
4577 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4578 return ICE_ERR_PARAM;
4580 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4581 return ICE_ERR_PARAM;
4582 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4584 /* Add filter if it doesn't exist so then the adding of large
4585 * action always results in update
4588 INIT_LIST_HEAD(&l_head);
4589 fl_info.fltr_info = *f_info;
4590 LIST_ADD(&fl_info.list_entry, &l_head);
4592 entry_exists = false;
4593 ret = ice_add_mac(hw, &l_head);
4594 if (ret == ICE_ERR_ALREADY_EXISTS)
4595 entry_exists = true;
4599 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4600 ice_acquire_lock(rule_lock);
4601 /* Get the book keeping entry for the filter */
4602 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4606 /* If counter action was enabled for this rule then don't enable
4607 * sw marker large action
4609 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4610 ret = ICE_ERR_PARAM;
4614 /* if same marker was added before */
4615 if (m_entry->sw_marker_id == sw_marker) {
4616 ret = ICE_ERR_ALREADY_EXISTS;
4620 /* Allocate a hardware table entry to hold large act. Three actions
4621 * for marker based large action
4623 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4627 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4630 /* Update the switch rule to add the marker action */
4631 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4633 ice_release_lock(rule_lock);
4638 ice_release_lock(rule_lock);
4639 /* only remove entry if it did not exist previously */
4641 ret = ice_remove_mac(hw, &l_head);
4647 * ice_add_mac_with_counter - add filter with counter enabled
4648 * @hw: pointer to the hardware structure
4649 * @f_info: pointer to filter info structure containing the MAC filter
4653 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4655 struct ice_switch_info *sw = hw->switch_info;
4656 struct ice_fltr_mgmt_list_entry *m_entry;
4657 struct ice_fltr_list_entry fl_info;
4658 struct LIST_HEAD_TYPE l_head;
4659 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4660 enum ice_status ret;
4665 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4666 return ICE_ERR_PARAM;
4668 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4669 return ICE_ERR_PARAM;
4671 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4672 return ICE_ERR_PARAM;
4673 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4675 entry_exist = false;
4677 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4679 /* Add filter if it doesn't exist so then the adding of large
4680 * action always results in update
4682 INIT_LIST_HEAD(&l_head);
4684 fl_info.fltr_info = *f_info;
4685 LIST_ADD(&fl_info.list_entry, &l_head);
4687 ret = ice_add_mac(hw, &l_head);
4688 if (ret == ICE_ERR_ALREADY_EXISTS)
4693 ice_acquire_lock(rule_lock);
4694 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4696 ret = ICE_ERR_BAD_PTR;
4700 /* Don't enable counter for a filter for which sw marker was enabled */
4701 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4702 ret = ICE_ERR_PARAM;
4706 /* If a counter was already enabled then don't need to add again */
4707 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4708 ret = ICE_ERR_ALREADY_EXISTS;
4712 /* Allocate a hardware table entry to VLAN counter */
4713 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4717 /* Allocate a hardware table entry to hold large act. Two actions for
4718 * counter based large action
4720 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4724 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4727 /* Update the switch rule to add the counter action */
4728 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4730 ice_release_lock(rule_lock);
4735 ice_release_lock(rule_lock);
4736 /* only remove entry if it did not exist previously */
4738 ret = ice_remove_mac(hw, &l_head);
4743 /* This is mapping table entry that maps every word within a given protocol
4744 * structure to the real byte offset as per the specification of that
4746 * for example dst address is 3 words in ethertype header and corresponding
4747 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4748 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4749 * matching entry describing its field. This needs to be updated if new
4750 * structure is added to that union.
4752 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4753 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4754 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4755 { ICE_ETYPE_OL, { 0 } },
4756 { ICE_VLAN_OFOS, { 0, 2 } },
4757 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4758 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4759 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4760 26, 28, 30, 32, 34, 36, 38 } },
4761 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4762 26, 28, 30, 32, 34, 36, 38 } },
4763 { ICE_TCP_IL, { 0, 2 } },
4764 { ICE_UDP_OF, { 0, 2 } },
4765 { ICE_UDP_ILOS, { 0, 2 } },
4766 { ICE_SCTP_IL, { 0, 2 } },
4767 { ICE_VXLAN, { 8, 10, 12, 14 } },
4768 { ICE_GENEVE, { 8, 10, 12, 14 } },
4769 { ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
4770 { ICE_NVGRE, { 0, 2, 4, 6 } },
4771 { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20 } },
4772 { ICE_PPPOE, { 0, 2, 4, 6 } },
4775 /* The following table describes preferred grouping of recipes.
4776 * If a recipe that needs to be programmed is a superset or matches one of the
4777 * following combinations, then the recipe needs to be chained as per the
4781 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4782 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4783 { ICE_MAC_IL, ICE_MAC_IL_HW },
4784 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4785 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4786 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4787 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4788 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4789 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4790 { ICE_TCP_IL, ICE_TCP_IL_HW },
4791 { ICE_UDP_OF, ICE_UDP_OF_HW },
4792 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4793 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4794 { ICE_VXLAN, ICE_UDP_OF_HW },
4795 { ICE_GENEVE, ICE_UDP_OF_HW },
4796 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4797 { ICE_NVGRE, ICE_GRE_OF_HW },
4798 { ICE_GTP, ICE_UDP_OF_HW },
4799 { ICE_PPPOE, ICE_PPPOE_HW },
4803 * ice_find_recp - find a recipe
4804 * @hw: pointer to the hardware structure
4805 * @lkup_exts: extension sequence to match
4807 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4809 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4811 bool refresh_required = true;
4812 struct ice_sw_recipe *recp;
4815 /* Walk through existing recipes to find a match */
4816 recp = hw->switch_info->recp_list;
4817 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4818 /* If recipe was not created for this ID, in SW bookkeeping,
4819 * check if FW has an entry for this recipe. If the FW has an
4820 * entry update it in our SW bookkeeping and continue with the
4823 if (!recp[i].recp_created)
4824 if (ice_get_recp_frm_fw(hw,
4825 hw->switch_info->recp_list, i,
4829 /* Skip inverse action recipes */
4830 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4831 ICE_AQ_RECIPE_ACT_INV_ACT)
4834 /* if number of words we are looking for match */
4835 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4836 struct ice_fv_word *a = lkup_exts->fv_words;
4837 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4841 for (p = 0; p < lkup_exts->n_val_words; p++) {
4842 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4844 if (a[p].off == b[q].off &&
4845 a[p].prot_id == b[q].prot_id)
4846 /* Found the "p"th word in the
4851 /* After walking through all the words in the
4852 * "i"th recipe if "p"th word was not found then
4853 * this recipe is not what we are looking for.
4854 * So break out from this loop and try the next
4857 if (q >= recp[i].lkup_exts.n_val_words) {
4862 /* If for "i"th recipe the found was never set to false
4863 * then it means we found our match
4866 return i; /* Return the recipe ID */
4869 return ICE_MAX_NUM_RECIPES;
4873 * ice_prot_type_to_id - get protocol ID from protocol type
4874 * @type: protocol type
4875 * @id: pointer to variable that will receive the ID
4877 * Returns true if found, false otherwise
4879 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4883 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4884 if (ice_prot_id_tbl[i].type == type) {
4885 *id = ice_prot_id_tbl[i].protocol_id;
4892 * ice_find_valid_words - count valid words
4893 * @rule: advanced rule with lookup information
4894 * @lkup_exts: byte offset extractions of the words that are valid
4896 * calculate valid words in a lookup rule using mask value
4899 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4900 struct ice_prot_lkup_ext *lkup_exts)
4906 if (!ice_prot_type_to_id(rule->type, &prot_id))
4909 word = lkup_exts->n_val_words;
4911 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4912 if (((u16 *)&rule->m_u)[j] &&
4913 (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
4914 /* No more space to accommodate */
4915 if (word >= ICE_MAX_CHAIN_WORDS)
4917 lkup_exts->fv_words[word].off =
4918 ice_prot_ext[rule->type].offs[j];
4919 lkup_exts->fv_words[word].prot_id =
4920 ice_prot_id_tbl[rule->type].protocol_id;
4921 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4925 ret_val = word - lkup_exts->n_val_words;
4926 lkup_exts->n_val_words = word;
4932 * ice_create_first_fit_recp_def - Create a recipe grouping
4933 * @hw: pointer to the hardware structure
4934 * @lkup_exts: an array of protocol header extractions
4935 * @rg_list: pointer to a list that stores new recipe groups
4936 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4938 * Using first fit algorithm, take all the words that are still not done
4939 * and start grouping them in 4-word groups. Each group makes up one
4942 static enum ice_status
4943 ice_create_first_fit_recp_def(struct ice_hw *hw,
4944 struct ice_prot_lkup_ext *lkup_exts,
4945 struct LIST_HEAD_TYPE *rg_list,
4948 struct ice_pref_recipe_group *grp = NULL;
4953 /* Walk through every word in the rule to check if it is not done. If so
4954 * then this word needs to be part of a new recipe.
4956 for (j = 0; j < lkup_exts->n_val_words; j++)
4957 if (!ice_is_bit_set(lkup_exts->done, j)) {
4959 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4960 struct ice_recp_grp_entry *entry;
4962 entry = (struct ice_recp_grp_entry *)
4963 ice_malloc(hw, sizeof(*entry));
4965 return ICE_ERR_NO_MEMORY;
4966 LIST_ADD(&entry->l_entry, rg_list);
4967 grp = &entry->r_group;
4971 grp->pairs[grp->n_val_pairs].prot_id =
4972 lkup_exts->fv_words[j].prot_id;
4973 grp->pairs[grp->n_val_pairs].off =
4974 lkup_exts->fv_words[j].off;
4975 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4983 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4984 * @hw: pointer to the hardware structure
4985 * @fv_list: field vector with the extraction sequence information
4986 * @rg_list: recipe groupings with protocol-offset pairs
4988 * Helper function to fill in the field vector indices for protocol-offset
4989 * pairs. These indexes are then ultimately programmed into a recipe.
4991 static enum ice_status
4992 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4993 struct LIST_HEAD_TYPE *rg_list)
4995 struct ice_sw_fv_list_entry *fv;
4996 struct ice_recp_grp_entry *rg;
4997 struct ice_fv_word *fv_ext;
4999 if (LIST_EMPTY(fv_list))
5002 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
5003 fv_ext = fv->fv_ptr->ew;
5005 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
5008 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
5009 struct ice_fv_word *pr;
5014 pr = &rg->r_group.pairs[i];
5015 mask = rg->r_group.mask[i];
5017 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
5018 if (fv_ext[j].prot_id == pr->prot_id &&
5019 fv_ext[j].off == pr->off) {
5022 /* Store index of field vector */
5024 /* Mask is given by caller as big
5025 * endian, but sent to FW as little
5028 rg->fv_mask[i] = mask << 8 | mask >> 8;
5032 /* Protocol/offset could not be found, caller gave an
5036 return ICE_ERR_PARAM;
5044 * ice_find_free_recp_res_idx - find free result indexes for recipe
5045 * @hw: pointer to hardware structure
5046 * @profiles: bitmap of profiles that will be associated with the new recipe
5047 * @free_idx: pointer to variable to receive the free index bitmap
5049 * The algorithm used here is:
5050 * 1. When creating a new recipe, create a set P which contains all
5051 * Profiles that will be associated with our new recipe
5053 * 2. For each Profile p in set P:
5054 * a. Add all recipes associated with Profile p into set R
5055 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
5056 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
5057 * i. Or just assume they all have the same possible indexes:
5059 * i.e., PossibleIndexes = 0x0000F00000000000
5061 * 3. For each Recipe r in set R:
5062 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
5063 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
5065 * FreeIndexes will contain the bits indicating the indexes free for use,
5066 * then the code needs to update the recipe[r].used_result_idx_bits to
5067 * indicate which indexes were selected for use by this recipe.
5070 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
5071 ice_bitmap_t *free_idx)
5073 ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5074 ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5075 ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
5079 ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
5080 ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
5081 ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
5082 ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
5084 for (count = 0; count < ICE_MAX_FV_WORDS; count++)
5085 ice_set_bit(count, possible_idx);
5087 /* For each profile we are going to associate the recipe with, add the
5088 * recipes that are associated with that profile. This will give us
5089 * the set of recipes that our recipe may collide with. Also, determine
5090 * what possible result indexes are usable given this set of profiles.
5093 while (ICE_MAX_NUM_PROFILES >
5094 (bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
5095 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
5096 ICE_MAX_NUM_RECIPES);
5097 ice_and_bitmap(possible_idx, possible_idx,
5098 hw->switch_info->prof_res_bm[bit],
5103 /* For each recipe that our new recipe may collide with, determine
5104 * which indexes have been used.
5106 for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
5107 if (ice_is_bit_set(recipes, bit)) {
5108 ice_or_bitmap(used_idx, used_idx,
5109 hw->switch_info->recp_list[bit].res_idxs,
5113 ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
5115 /* return number of free indexes */
5118 while (ICE_MAX_FV_WORDS >
5119 (bit = ice_find_next_bit(free_idx, ICE_MAX_FV_WORDS, bit))) {
5128 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
5129 * @hw: pointer to hardware structure
5130 * @rm: recipe management list entry
5131 * @match_tun: if field vector index for tunnel needs to be programmed
5132 * @profiles: bitmap of profiles that will be assocated.
5134 static enum ice_status
5135 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
5136 bool match_tun, ice_bitmap_t *profiles)
5138 ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5139 struct ice_aqc_recipe_data_elem *tmp;
5140 struct ice_aqc_recipe_data_elem *buf;
5141 struct ice_recp_grp_entry *entry;
5142 enum ice_status status;
5148 /* When more than one recipe are required, another recipe is needed to
5149 * chain them together. Matching a tunnel metadata ID takes up one of
5150 * the match fields in the chaining recipe reducing the number of
5151 * chained recipes by one.
5153 /* check number of free result indices */
5154 ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
5155 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5157 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5158 free_res_idx, rm->n_grp_count);
5160 if (rm->n_grp_count > 1) {
5161 if (rm->n_grp_count > free_res_idx)
5162 return ICE_ERR_MAX_LIMIT;
5167 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
5168 ICE_MAX_NUM_RECIPES,
5171 return ICE_ERR_NO_MEMORY;
5173 buf = (struct ice_aqc_recipe_data_elem *)
5174 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
5176 status = ICE_ERR_NO_MEMORY;
5180 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5181 recipe_count = ICE_MAX_NUM_RECIPES;
5182 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5184 if (status || recipe_count == 0)
5187 /* Allocate the recipe resources, and configure them according to the
5188 * match fields from protocol headers and extracted field vectors.
5190 chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5191 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5194 status = ice_alloc_recipe(hw, &entry->rid);
5198 /* Clear the result index of the located recipe, as this will be
5199 * updated, if needed, later in the recipe creation process.
5201 tmp[0].content.result_indx = 0;
5203 buf[recps] = tmp[0];
5204 buf[recps].recipe_indx = (u8)entry->rid;
5205 /* if the recipe is a non-root recipe RID should be programmed
5206 * as 0 for the rules to be applied correctly.
5208 buf[recps].content.rid = 0;
5209 ice_memset(&buf[recps].content.lkup_indx, 0,
5210 sizeof(buf[recps].content.lkup_indx),
5213 /* All recipes use look-up index 0 to match switch ID. */
5214 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5215 buf[recps].content.mask[0] =
5216 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5217 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5220 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5221 buf[recps].content.lkup_indx[i] = 0x80;
5222 buf[recps].content.mask[i] = 0;
5225 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5226 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5227 buf[recps].content.mask[i + 1] =
5228 CPU_TO_LE16(entry->fv_mask[i]);
5231 if (rm->n_grp_count > 1) {
5232 /* Checks to see if there really is a valid result index
5235 if (chain_idx >= ICE_MAX_FV_WORDS) {
5236 ice_debug(hw, ICE_DBG_SW,
5237 "No chain index available\n");
5238 status = ICE_ERR_MAX_LIMIT;
5242 entry->chain_idx = chain_idx;
5243 buf[recps].content.result_indx =
5244 ICE_AQ_RECIPE_RESULT_EN |
5245 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5246 ICE_AQ_RECIPE_RESULT_DATA_M);
5247 ice_clear_bit(chain_idx, result_idx_bm);
5248 chain_idx = ice_find_first_bit(result_idx_bm,
5252 /* fill recipe dependencies */
5253 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
5254 ICE_MAX_NUM_RECIPES);
5255 ice_set_bit(buf[recps].recipe_indx,
5256 (ice_bitmap_t *)buf[recps].recipe_bitmap);
5257 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5261 if (rm->n_grp_count == 1) {
5262 rm->root_rid = buf[0].recipe_indx;
5263 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
5264 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5265 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5266 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5267 sizeof(buf[0].recipe_bitmap),
5268 ICE_NONDMA_TO_NONDMA);
5270 status = ICE_ERR_BAD_PTR;
5273 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5274 * the recipe which is getting created if specified
5275 * by user. Usually any advanced switch filter, which results
5276 * into new extraction sequence, ended up creating a new recipe
5277 * of type ROOT and usually recipes are associated with profiles
5278 * Switch rule referreing newly created recipe, needs to have
5279 * either/or 'fwd' or 'join' priority, otherwise switch rule
5280 * evaluation will not happen correctly. In other words, if
5281 * switch rule to be evaluated on priority basis, then recipe
5282 * needs to have priority, otherwise it will be evaluated last.
5284 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5286 struct ice_recp_grp_entry *last_chain_entry;
5289 /* Allocate the last recipe that will chain the outcomes of the
5290 * other recipes together
5292 status = ice_alloc_recipe(hw, &rid);
5296 buf[recps].recipe_indx = (u8)rid;
5297 buf[recps].content.rid = (u8)rid;
5298 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5299 /* the new entry created should also be part of rg_list to
5300 * make sure we have complete recipe
5302 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
5303 sizeof(*last_chain_entry));
5304 if (!last_chain_entry) {
5305 status = ICE_ERR_NO_MEMORY;
5308 last_chain_entry->rid = rid;
5309 ice_memset(&buf[recps].content.lkup_indx, 0,
5310 sizeof(buf[recps].content.lkup_indx),
5312 /* All recipes use look-up index 0 to match switch ID. */
5313 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5314 buf[recps].content.mask[0] =
5315 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
5316 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5317 buf[recps].content.lkup_indx[i] =
5318 ICE_AQ_RECIPE_LKUP_IGNORE;
5319 buf[recps].content.mask[i] = 0;
5323 /* update r_bitmap with the recp that is used for chaining */
5324 ice_set_bit(rid, rm->r_bitmap);
5325 /* this is the recipe that chains all the other recipes so it
5326 * should not have a chaining ID to indicate the same
5328 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5329 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
5331 last_chain_entry->fv_idx[i] = entry->chain_idx;
5332 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5333 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
5334 ice_set_bit(entry->rid, rm->r_bitmap);
5336 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
5337 if (sizeof(buf[recps].recipe_bitmap) >=
5338 sizeof(rm->r_bitmap)) {
5339 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5340 sizeof(buf[recps].recipe_bitmap),
5341 ICE_NONDMA_TO_NONDMA);
5343 status = ICE_ERR_BAD_PTR;
5346 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5348 /* To differentiate among different UDP tunnels, a meta data ID
5352 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
5353 buf[recps].content.mask[i] =
5354 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
5358 rm->root_rid = (u8)rid;
5360 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5364 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5365 ice_release_change_lock(hw);
5369 /* Every recipe that just got created add it to the recipe
5372 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
5373 struct ice_switch_info *sw = hw->switch_info;
5374 bool is_root, idx_found = false;
5375 struct ice_sw_recipe *recp;
5376 u16 idx, buf_idx = 0;
5378 /* find buffer index for copying some data */
5379 for (idx = 0; idx < rm->n_grp_count; idx++)
5380 if (buf[idx].recipe_indx == entry->rid) {
5386 status = ICE_ERR_OUT_OF_RANGE;
5390 recp = &sw->recp_list[entry->rid];
5391 is_root = (rm->root_rid == entry->rid);
5392 recp->is_root = is_root;
5394 recp->root_rid = entry->rid;
5395 recp->big_recp = (is_root && rm->n_grp_count > 1);
5397 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
5398 entry->r_group.n_val_pairs *
5399 sizeof(struct ice_fv_word),
5400 ICE_NONDMA_TO_NONDMA);
5402 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5403 sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
5405 /* Copy non-result fv index values and masks to recipe. This
5406 * call will also update the result recipe bitmask.
5408 ice_collect_result_idx(&buf[buf_idx], recp);
5410 /* for non-root recipes, also copy to the root, this allows
5411 * easier matching of a complete chained recipe
5414 ice_collect_result_idx(&buf[buf_idx],
5415 &sw->recp_list[rm->root_rid]);
5417 recp->n_ext_words = entry->r_group.n_val_pairs;
5418 recp->chain_idx = entry->chain_idx;
5419 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5420 recp->n_grp_count = rm->n_grp_count;
5421 recp->tun_type = rm->tun_type;
5422 recp->recp_created = true;
5437 * ice_create_recipe_group - creates recipe group
5438 * @hw: pointer to hardware structure
5439 * @rm: recipe management list entry
5440 * @lkup_exts: lookup elements
5442 static enum ice_status
5443 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5444 struct ice_prot_lkup_ext *lkup_exts)
5446 enum ice_status status;
5449 rm->n_grp_count = 0;
5451 /* Create recipes for words that are marked not done by packing them
5454 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5455 &rm->rg_list, &recp_count);
5457 rm->n_grp_count += recp_count;
5458 rm->n_ext_words = lkup_exts->n_val_words;
5459 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5460 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5461 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5462 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5469 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5470 * @hw: pointer to hardware structure
5471 * @lkups: lookup elements or match criteria for the advanced recipe, one
5472 * structure per protocol header
5473 * @lkups_cnt: number of protocols
5474 * @bm: bitmap of field vectors to consider
5475 * @fv_list: pointer to a list that holds the returned field vectors
5477 static enum ice_status
5478 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5479 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
5481 enum ice_status status;
5485 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5487 return ICE_ERR_NO_MEMORY;
5489 for (i = 0; i < lkups_cnt; i++)
5490 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5491 status = ICE_ERR_CFG;
5495 /* Find field vectors that include all specified protocol types */
5496 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
5499 ice_free(hw, prot_ids);
5504 * ice_add_special_words - Add words that are not protocols, such as metadata
5505 * @rinfo: other information regarding the rule e.g. priority and action info
5506 * @lkup_exts: lookup word structure
5508 static enum ice_status
5509 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5510 struct ice_prot_lkup_ext *lkup_exts)
5512 /* If this is a tunneled packet, then add recipe index to match the
5513 * tunnel bit in the packet metadata flags.
5515 if (rinfo->tun_type != ICE_NON_TUN) {
5516 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5517 u8 word = lkup_exts->n_val_words++;
5519 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5520 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID *
5522 lkup_exts->field_mask[word] = ICE_TUN_FLAG_MASK;
5524 return ICE_ERR_MAX_LIMIT;
5531 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5532 * @hw: pointer to hardware structure
5533 * @rinfo: other information regarding the rule e.g. priority and action info
5534 * @bm: pointer to memory for returning the bitmap of field vectors
5537 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5540 enum ice_prof_type type;
5542 switch (rinfo->tun_type) {
5544 type = ICE_PROF_NON_TUN;
5546 case ICE_ALL_TUNNELS:
5547 type = ICE_PROF_TUN_ALL;
5549 case ICE_SW_TUN_VXLAN_GPE:
5550 case ICE_SW_TUN_GENEVE:
5551 case ICE_SW_TUN_VXLAN:
5552 case ICE_SW_TUN_UDP:
5553 case ICE_SW_TUN_GTP:
5554 type = ICE_PROF_TUN_UDP;
5556 case ICE_SW_TUN_NVGRE:
5557 type = ICE_PROF_TUN_GRE;
5559 case ICE_SW_TUN_PPPOE:
5560 type = ICE_PROF_TUN_PPPOE;
5562 case ICE_SW_TUN_AND_NON_TUN:
5564 type = ICE_PROF_ALL;
5568 ice_get_sw_fv_bitmap(hw, type, bm);
5572 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5573 * @hw: pointer to hardware structure
5574 * @lkups: lookup elements or match criteria for the advanced recipe, one
5575 * structure per protocol header
5576 * @lkups_cnt: number of protocols
5577 * @rinfo: other information regarding the rule e.g. priority and action info
5578 * @rid: return the recipe ID of the recipe created
5580 static enum ice_status
5581 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5582 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5584 ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
5585 ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5586 struct ice_prot_lkup_ext *lkup_exts;
5587 struct ice_recp_grp_entry *r_entry;
5588 struct ice_sw_fv_list_entry *fvit;
5589 struct ice_recp_grp_entry *r_tmp;
5590 struct ice_sw_fv_list_entry *tmp;
5591 enum ice_status status = ICE_SUCCESS;
5592 struct ice_sw_recipe *rm;
5593 bool match_tun = false;
5597 return ICE_ERR_PARAM;
5599 lkup_exts = (struct ice_prot_lkup_ext *)
5600 ice_malloc(hw, sizeof(*lkup_exts));
5602 return ICE_ERR_NO_MEMORY;
5604 /* Determine the number of words to be matched and if it exceeds a
5605 * recipe's restrictions
5607 for (i = 0; i < lkups_cnt; i++) {
5610 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5611 status = ICE_ERR_CFG;
5612 goto err_free_lkup_exts;
5615 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5617 status = ICE_ERR_CFG;
5618 goto err_free_lkup_exts;
5622 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5624 status = ICE_ERR_NO_MEMORY;
5625 goto err_free_lkup_exts;
5628 /* Get field vectors that contain fields extracted from all the protocol
5629 * headers being programmed.
5631 INIT_LIST_HEAD(&rm->fv_list);
5632 INIT_LIST_HEAD(&rm->rg_list);
5634 /* Get bitmap of field vectors (profiles) that are compatible with the
5635 * rule request; only these will be searched in the subsequent call to
5638 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5640 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
5644 /* Group match words into recipes using preferred recipe grouping
5647 status = ice_create_recipe_group(hw, rm, lkup_exts);
5651 /* There is only profile for UDP tunnels. So, it is necessary to use a
5652 * metadata ID flag to differentiate different tunnel types. A separate
5653 * recipe needs to be used for the metadata.
5655 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5656 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5657 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5660 /* set the recipe priority if specified */
5661 rm->priority = rinfo->priority ? rinfo->priority : 0;
5663 /* Find offsets from the field vector. Pick the first one for all the
5666 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5670 /* get bitmap of all profiles the recipe will be associated with */
5671 ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
5672 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5674 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5675 ice_set_bit((u16)fvit->profile_id, profiles);
5678 /* Create any special protocol/offset pairs, such as looking at tunnel
5679 * bits by extracting metadata
5681 status = ice_add_special_words(rinfo, lkup_exts);
5683 goto err_free_lkup_exts;
5685 /* Look for a recipe which matches our requested fv / mask list */
5686 *rid = ice_find_recp(hw, lkup_exts);
5687 if (*rid < ICE_MAX_NUM_RECIPES)
5688 /* Success if found a recipe that match the existing criteria */
5691 /* Recipe we need does not exist, add a recipe */
5692 status = ice_add_sw_recipe(hw, rm, match_tun, profiles);
5696 /* Associate all the recipes created with all the profiles in the
5697 * common field vector.
5699 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5701 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5704 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5705 (u8 *)r_bitmap, NULL);
5709 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
5710 ICE_MAX_NUM_RECIPES);
5711 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5715 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5718 ice_release_change_lock(hw);
5723 /* Update profile to recipe bitmap array */
5724 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
5725 ICE_MAX_NUM_RECIPES);
5727 /* Update recipe to profile bitmap array */
5728 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
5729 if (ice_is_bit_set(r_bitmap, j))
5730 ice_set_bit((u16)fvit->profile_id,
5731 recipe_to_profile[j]);
5734 *rid = rm->root_rid;
5735 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5736 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5738 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5739 ice_recp_grp_entry, l_entry) {
5740 LIST_DEL(&r_entry->l_entry);
5741 ice_free(hw, r_entry);
5744 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5746 LIST_DEL(&fvit->list_entry);
5751 ice_free(hw, rm->root_buf);
5756 ice_free(hw, lkup_exts);
5762 * ice_find_dummy_packet - find dummy packet by tunnel type
5764 * @lkups: lookup elements or match criteria for the advanced recipe, one
5765 * structure per protocol header
5766 * @lkups_cnt: number of protocols
5767 * @tun_type: tunnel type from the match criteria
5768 * @pkt: dummy packet to fill according to filter match criteria
5769 * @pkt_len: packet length of dummy packet
5770 * @offsets: pointer to receive the pointer to the offsets for the packet
5773 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5774 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5776 const struct ice_dummy_pkt_offsets **offsets)
5778 bool tcp = false, udp = false, ipv6 = false, vlan = false;
5781 if (tun_type == ICE_SW_TUN_GTP) {
5782 *pkt = dummy_udp_gtp_packet;
5783 *pkt_len = sizeof(dummy_udp_gtp_packet);
5784 *offsets = dummy_udp_gtp_packet_offsets;
5787 if (tun_type == ICE_SW_TUN_PPPOE) {
5788 *pkt = dummy_pppoe_packet;
5789 *pkt_len = sizeof(dummy_pppoe_packet);
5790 *offsets = dummy_pppoe_packet_offsets;
5793 for (i = 0; i < lkups_cnt; i++) {
5794 if (lkups[i].type == ICE_UDP_ILOS)
5796 else if (lkups[i].type == ICE_TCP_IL)
5798 else if (lkups[i].type == ICE_IPV6_OFOS)
5800 else if (lkups[i].type == ICE_VLAN_OFOS)
5804 if (tun_type == ICE_ALL_TUNNELS) {
5805 *pkt = dummy_gre_udp_packet;
5806 *pkt_len = sizeof(dummy_gre_udp_packet);
5807 *offsets = dummy_gre_udp_packet_offsets;
5811 if (tun_type == ICE_SW_TUN_NVGRE) {
5813 *pkt = dummy_gre_tcp_packet;
5814 *pkt_len = sizeof(dummy_gre_tcp_packet);
5815 *offsets = dummy_gre_tcp_packet_offsets;
5819 *pkt = dummy_gre_udp_packet;
5820 *pkt_len = sizeof(dummy_gre_udp_packet);
5821 *offsets = dummy_gre_udp_packet_offsets;
5825 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5826 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5828 *pkt = dummy_udp_tun_tcp_packet;
5829 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5830 *offsets = dummy_udp_tun_tcp_packet_offsets;
5834 *pkt = dummy_udp_tun_udp_packet;
5835 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5836 *offsets = dummy_udp_tun_udp_packet_offsets;
5842 *pkt = dummy_vlan_udp_packet;
5843 *pkt_len = sizeof(dummy_vlan_udp_packet);
5844 *offsets = dummy_vlan_udp_packet_offsets;
5847 *pkt = dummy_udp_packet;
5848 *pkt_len = sizeof(dummy_udp_packet);
5849 *offsets = dummy_udp_packet_offsets;
5851 } else if (udp && ipv6) {
5853 *pkt = dummy_vlan_udp_ipv6_packet;
5854 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
5855 *offsets = dummy_vlan_udp_ipv6_packet_offsets;
5858 *pkt = dummy_udp_ipv6_packet;
5859 *pkt_len = sizeof(dummy_udp_ipv6_packet);
5860 *offsets = dummy_udp_ipv6_packet_offsets;
5862 } else if ((tcp && ipv6) || ipv6) {
5864 *pkt = dummy_vlan_tcp_ipv6_packet;
5865 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
5866 *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
5869 *pkt = dummy_tcp_ipv6_packet;
5870 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
5871 *offsets = dummy_tcp_ipv6_packet_offsets;
5876 *pkt = dummy_vlan_tcp_packet;
5877 *pkt_len = sizeof(dummy_vlan_tcp_packet);
5878 *offsets = dummy_vlan_tcp_packet_offsets;
5880 *pkt = dummy_tcp_packet;
5881 *pkt_len = sizeof(dummy_tcp_packet);
5882 *offsets = dummy_tcp_packet_offsets;
5887 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5889 * @lkups: lookup elements or match criteria for the advanced recipe, one
5890 * structure per protocol header
5891 * @lkups_cnt: number of protocols
5892 * @s_rule: stores rule information from the match criteria
5893 * @dummy_pkt: dummy packet to fill according to filter match criteria
5894 * @pkt_len: packet length of dummy packet
5895 * @offsets: offset info for the dummy packet
5897 static enum ice_status
5898 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5899 struct ice_aqc_sw_rules_elem *s_rule,
5900 const u8 *dummy_pkt, u16 pkt_len,
5901 const struct ice_dummy_pkt_offsets *offsets)
5906 /* Start with a packet with a pre-defined/dummy content. Then, fill
5907 * in the header values to be looked up or matched.
5909 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5911 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5913 for (i = 0; i < lkups_cnt; i++) {
5914 enum ice_protocol_type type;
5915 u16 offset = 0, len = 0, j;
5918 /* find the start of this layer; it should be found since this
5919 * was already checked when search for the dummy packet
5921 type = lkups[i].type;
5922 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5923 if (type == offsets[j].type) {
5924 offset = offsets[j].offset;
5929 /* this should never happen in a correct calling sequence */
5931 return ICE_ERR_PARAM;
5933 switch (lkups[i].type) {
5936 len = sizeof(struct ice_ether_hdr);
5939 len = sizeof(struct ice_ethtype_hdr);
5942 len = sizeof(struct ice_vlan_hdr);
5946 len = sizeof(struct ice_ipv4_hdr);
5950 len = sizeof(struct ice_ipv6_hdr);
5955 len = sizeof(struct ice_l4_hdr);
5958 len = sizeof(struct ice_sctp_hdr);
5961 len = sizeof(struct ice_nvgre);
5966 len = sizeof(struct ice_udp_tnl_hdr);
5970 len = sizeof(struct ice_udp_gtp_hdr);
5973 len = sizeof(struct ice_pppoe_hdr);
5976 return ICE_ERR_PARAM;
5979 /* the length should be a word multiple */
5980 if (len % ICE_BYTES_PER_WORD)
5983 /* We have the offset to the header start, the length, the
5984 * caller's header values and mask. Use this information to
5985 * copy the data into the dummy packet appropriately based on
5986 * the mask. Note that we need to only write the bits as
5987 * indicated by the mask to make sure we don't improperly write
5988 * over any significant packet data.
5990 for (j = 0; j < len / sizeof(u16); j++)
5991 if (((u16 *)&lkups[i].m_u)[j])
5992 ((u16 *)(pkt + offset))[j] =
5993 (((u16 *)(pkt + offset))[j] &
5994 ~((u16 *)&lkups[i].m_u)[j]) |
5995 (((u16 *)&lkups[i].h_u)[j] &
5996 ((u16 *)&lkups[i].m_u)[j]);
5999 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
6005 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
6006 * @hw: pointer to the hardware structure
6007 * @tun_type: tunnel type
6008 * @pkt: dummy packet to fill in
6009 * @offsets: offset info for the dummy packet
6011 static enum ice_status
6012 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
6013 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
6018 case ICE_SW_TUN_AND_NON_TUN:
6019 case ICE_SW_TUN_VXLAN_GPE:
6020 case ICE_SW_TUN_VXLAN:
6021 case ICE_SW_TUN_UDP:
6022 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
6026 case ICE_SW_TUN_GENEVE:
6027 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
6032 /* Nothing needs to be done for this tunnel type */
6036 /* Find the outer UDP protocol header and insert the port number */
6037 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
6038 if (offsets[i].type == ICE_UDP_OF) {
6039 struct ice_l4_hdr *hdr;
6042 offset = offsets[i].offset;
6043 hdr = (struct ice_l4_hdr *)&pkt[offset];
6044 hdr->dst_port = CPU_TO_BE16(open_port);
6054 * ice_find_adv_rule_entry - Search a rule entry
6055 * @hw: pointer to the hardware structure
6056 * @lkups: lookup elements or match criteria for the advanced recipe, one
6057 * structure per protocol header
6058 * @lkups_cnt: number of protocols
6059 * @recp_id: recipe ID for which we are finding the rule
6060 * @rinfo: other information regarding the rule e.g. priority and action info
6062 * Helper function to search for a given advance rule entry
6063 * Returns pointer to entry storing the rule if found
6065 static struct ice_adv_fltr_mgmt_list_entry *
6066 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6067 u16 lkups_cnt, u8 recp_id,
6068 struct ice_adv_rule_info *rinfo)
6070 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6071 struct ice_switch_info *sw = hw->switch_info;
6074 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
6075 ice_adv_fltr_mgmt_list_entry, list_entry) {
6076 bool lkups_matched = true;
6078 if (lkups_cnt != list_itr->lkups_cnt)
6080 for (i = 0; i < list_itr->lkups_cnt; i++)
6081 if (memcmp(&list_itr->lkups[i], &lkups[i],
6083 lkups_matched = false;
6086 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
6087 rinfo->tun_type == list_itr->rule_info.tun_type &&
6095 * ice_adv_add_update_vsi_list
6096 * @hw: pointer to the hardware structure
6097 * @m_entry: pointer to current adv filter management list entry
6098 * @cur_fltr: filter information from the book keeping entry
6099 * @new_fltr: filter information with the new VSI to be added
6101 * Call AQ command to add or update previously created VSI list with new VSI.
6103 * Helper function to do book keeping associated with adding filter information
6104 * The algorithm to do the booking keeping is described below :
6105 * When a VSI needs to subscribe to a given advanced filter
6106 * if only one VSI has been added till now
6107 * Allocate a new VSI list and add two VSIs
6108 * to this list using switch rule command
6109 * Update the previously created switch rule with the
6110 * newly created VSI list ID
6111 * if a VSI list was previously created
6112 * Add the new VSI to the previously created VSI list set
6113 * using the update switch rule command
6115 static enum ice_status
6116 ice_adv_add_update_vsi_list(struct ice_hw *hw,
6117 struct ice_adv_fltr_mgmt_list_entry *m_entry,
6118 struct ice_adv_rule_info *cur_fltr,
6119 struct ice_adv_rule_info *new_fltr)
6121 enum ice_status status;
6122 u16 vsi_list_id = 0;
6124 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6125 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6126 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
6127 return ICE_ERR_NOT_IMPL;
6129 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
6130 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
6131 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6132 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
6133 return ICE_ERR_NOT_IMPL;
6135 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
6136 /* Only one entry existed in the mapping and it was not already
6137 * a part of a VSI list. So, create a VSI list with the old and
6140 struct ice_fltr_info tmp_fltr;
6141 u16 vsi_handle_arr[2];
6143 /* A rule already exists with the new VSI being added */
6144 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
6145 new_fltr->sw_act.fwd_id.hw_vsi_id)
6146 return ICE_ERR_ALREADY_EXISTS;
6148 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
6149 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
6150 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
6156 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
6157 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
6158 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
6159 /* Update the previous switch rule of "forward to VSI" to
6162 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6166 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
6167 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
6168 m_entry->vsi_list_info =
6169 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
6172 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
6174 if (!m_entry->vsi_list_info)
6177 /* A rule already exists with the new VSI being added */
6178 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
6181 /* Update the previously created VSI list set with
6182 * the new VSI ID passed in
6184 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
6186 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
6188 ice_aqc_opc_update_sw_rules,
6190 /* update VSI list mapping info with new VSI ID */
6192 ice_set_bit(vsi_handle,
6193 m_entry->vsi_list_info->vsi_map);
6196 m_entry->vsi_count++;
6201 * ice_add_adv_rule - helper function to create an advanced switch rule
6202 * @hw: pointer to the hardware structure
6203 * @lkups: information on the words that needs to be looked up. All words
6204 * together makes one recipe
6205 * @lkups_cnt: num of entries in the lkups array
6206 * @rinfo: other information related to the rule that needs to be programmed
6207 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6208 * ignored is case of error.
6210 * This function can program only 1 rule at a time. The lkups is used to
6211 * describe the all the words that forms the "lookup" portion of the recipe.
6212 * These words can span multiple protocols. Callers to this function need to
6213 * pass in a list of protocol headers with lookup information along and mask
6214 * that determines which words are valid from the given protocol header.
6215 * rinfo describes other information related to this rule such as forwarding
6216 * IDs, priority of this rule, etc.
6219 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6220 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6221 struct ice_rule_query_data *added_entry)
6223 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6224 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
6225 const struct ice_dummy_pkt_offsets *pkt_offsets;
6226 struct ice_aqc_sw_rules_elem *s_rule = NULL;
6227 struct LIST_HEAD_TYPE *rule_head;
6228 struct ice_switch_info *sw;
6229 enum ice_status status;
6230 const u8 *pkt = NULL;
6235 /* Initialize profile to result index bitmap */
6236 if (!hw->switch_info->prof_res_bm_init) {
6237 hw->switch_info->prof_res_bm_init = 1;
6238 ice_init_prof_result_bm(hw);
6242 return ICE_ERR_PARAM;
6244 /* get # of words we need to match */
6246 for (i = 0; i < lkups_cnt; i++) {
6249 ptr = (u16 *)&lkups[i].m_u;
6250 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
6254 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
6255 return ICE_ERR_PARAM;
6257 /* make sure that we can locate a dummy packet */
6258 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
6261 status = ICE_ERR_PARAM;
6262 goto err_ice_add_adv_rule;
6265 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6266 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6267 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6268 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6271 vsi_handle = rinfo->sw_act.vsi_handle;
6272 if (!ice_is_vsi_valid(hw, vsi_handle))
6273 return ICE_ERR_PARAM;
6275 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6276 rinfo->sw_act.fwd_id.hw_vsi_id =
6277 ice_get_hw_vsi_num(hw, vsi_handle);
6278 if (rinfo->sw_act.flag & ICE_FLTR_TX)
6279 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6281 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6284 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6286 /* we have to add VSI to VSI_LIST and increment vsi_count.
6287 * Also Update VSI list so that we can change forwarding rule
6288 * if the rule already exists, we will check if it exists with
6289 * same vsi_id, if not then add it to the VSI list if it already
6290 * exists if not then create a VSI list and add the existing VSI
6291 * ID and the new VSI ID to the list
6292 * We will add that VSI to the list
6294 status = ice_adv_add_update_vsi_list(hw, m_entry,
6295 &m_entry->rule_info,
6298 added_entry->rid = rid;
6299 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6300 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6304 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
6305 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
6307 return ICE_ERR_NO_MEMORY;
6308 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
6309 switch (rinfo->sw_act.fltr_act) {
6310 case ICE_FWD_TO_VSI:
6311 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6312 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6313 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6316 act |= ICE_SINGLE_ACT_TO_Q;
6317 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6318 ICE_SINGLE_ACT_Q_INDEX_M;
6320 case ICE_FWD_TO_QGRP:
6321 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6322 (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
6323 act |= ICE_SINGLE_ACT_TO_Q;
6324 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6325 ICE_SINGLE_ACT_Q_INDEX_M;
6326 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6327 ICE_SINGLE_ACT_Q_REGION_M;
6329 case ICE_DROP_PACKET:
6330 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6331 ICE_SINGLE_ACT_VALID_BIT;
6334 status = ICE_ERR_CFG;
6335 goto err_ice_add_adv_rule;
6338 /* set the rule LOOKUP type based on caller specified 'RX'
6339 * instead of hardcoding it to be either LOOKUP_TX/RX
6341 * for 'RX' set the source to be the port number
6342 * for 'TX' set the source to be the source HW VSI number (determined
6346 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
6347 s_rule->pdata.lkup_tx_rx.src =
6348 CPU_TO_LE16(hw->port_info->lport);
6350 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
6351 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
6354 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
6355 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
6357 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
6358 pkt_len, pkt_offsets);
6360 goto err_ice_add_adv_rule;
6362 if (rinfo->tun_type != ICE_NON_TUN &&
6363 rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6364 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6365 s_rule->pdata.lkup_tx_rx.hdr,
6368 goto err_ice_add_adv_rule;
6371 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6372 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6375 goto err_ice_add_adv_rule;
6376 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
6377 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
6379 status = ICE_ERR_NO_MEMORY;
6380 goto err_ice_add_adv_rule;
6383 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
6384 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
6385 ICE_NONDMA_TO_NONDMA);
6386 if (!adv_fltr->lkups) {
6387 status = ICE_ERR_NO_MEMORY;
6388 goto err_ice_add_adv_rule;
6391 adv_fltr->lkups_cnt = lkups_cnt;
6392 adv_fltr->rule_info = *rinfo;
6393 adv_fltr->rule_info.fltr_rule_id =
6394 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6395 sw = hw->switch_info;
6396 sw->recp_list[rid].adv_rule = true;
6397 rule_head = &sw->recp_list[rid].filt_rules;
6399 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
6400 struct ice_fltr_info tmp_fltr;
6402 tmp_fltr.fltr_rule_id =
6403 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
6404 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6405 tmp_fltr.fwd_id.hw_vsi_id =
6406 ice_get_hw_vsi_num(hw, vsi_handle);
6407 tmp_fltr.vsi_handle = vsi_handle;
6408 /* Update the previous switch rule of "forward to VSI" to
6411 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6413 goto err_ice_add_adv_rule;
6414 adv_fltr->vsi_count = 1;
6417 /* Add rule entry to book keeping list */
6418 LIST_ADD(&adv_fltr->list_entry, rule_head);
6420 added_entry->rid = rid;
6421 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6422 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6424 err_ice_add_adv_rule:
6425 if (status && adv_fltr) {
6426 ice_free(hw, adv_fltr->lkups);
6427 ice_free(hw, adv_fltr);
6430 ice_free(hw, s_rule);
6436 * ice_adv_rem_update_vsi_list
6437 * @hw: pointer to the hardware structure
6438 * @vsi_handle: VSI handle of the VSI to remove
6439 * @fm_list: filter management entry for which the VSI list management needs to
6442 static enum ice_status
6443 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6444 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6446 struct ice_vsi_list_map_info *vsi_list_info;
6447 enum ice_sw_lkup_type lkup_type;
6448 enum ice_status status;
6451 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6452 fm_list->vsi_count == 0)
6453 return ICE_ERR_PARAM;
6455 /* A rule with the VSI being removed does not exist */
6456 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
6457 return ICE_ERR_DOES_NOT_EXIST;
6459 lkup_type = ICE_SW_LKUP_LAST;
6460 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6461 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6462 ice_aqc_opc_update_sw_rules,
6467 fm_list->vsi_count--;
6468 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6469 vsi_list_info = fm_list->vsi_list_info;
6470 if (fm_list->vsi_count == 1) {
6471 struct ice_fltr_info tmp_fltr;
6474 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
6476 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6477 return ICE_ERR_OUT_OF_RANGE;
6479 /* Make sure VSI list is empty before removing it below */
6480 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6482 ice_aqc_opc_update_sw_rules,
6486 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6487 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6488 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6489 tmp_fltr.fwd_id.hw_vsi_id =
6490 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6491 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6492 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6494 /* Update the previous switch rule of "MAC forward to VSI" to
6495 * "MAC fwd to VSI list"
6497 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6499 ice_debug(hw, ICE_DBG_SW,
6500 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6501 tmp_fltr.fwd_id.hw_vsi_id, status);
6505 /* Remove the VSI list since it is no longer used */
6506 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6508 ice_debug(hw, ICE_DBG_SW,
6509 "Failed to remove VSI list %d, error %d\n",
6510 vsi_list_id, status);
6514 LIST_DEL(&vsi_list_info->list_entry);
6515 ice_free(hw, vsi_list_info);
6516 fm_list->vsi_list_info = NULL;
6523 * ice_rem_adv_rule - removes existing advanced switch rule
6524 * @hw: pointer to the hardware structure
6525 * @lkups: information on the words that needs to be looked up. All words
6526 * together makes one recipe
6527 * @lkups_cnt: num of entries in the lkups array
6528 * @rinfo: Its the pointer to the rule information for the rule
6530 * This function can be used to remove 1 rule at a time. The lkups is
6531 * used to describe all the words that forms the "lookup" portion of the
6532 * rule. These words can span multiple protocols. Callers to this function
6533 * need to pass in a list of protocol headers with lookup information along
6534 * and mask that determines which words are valid from the given protocol
6535 * header. rinfo describes other information related to this rule such as
6536 * forwarding IDs, priority of this rule, etc.
6539 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6540 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6542 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6543 struct ice_prot_lkup_ext lkup_exts;
6544 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
6545 enum ice_status status = ICE_SUCCESS;
6546 bool remove_rule = false;
6547 u16 i, rid, vsi_handle;
6549 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
6550 for (i = 0; i < lkups_cnt; i++) {
6553 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6556 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6561 /* Create any special protocol/offset pairs, such as looking at tunnel
6562 * bits by extracting metadata
6564 status = ice_add_special_words(rinfo, &lkup_exts);
6568 rid = ice_find_recp(hw, &lkup_exts);
6569 /* If did not find a recipe that match the existing criteria */
6570 if (rid == ICE_MAX_NUM_RECIPES)
6571 return ICE_ERR_PARAM;
6573 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6574 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6575 /* the rule is already removed */
6578 ice_acquire_lock(rule_lock);
6579 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6581 } else if (list_elem->vsi_count > 1) {
6582 list_elem->vsi_list_info->ref_cnt--;
6583 remove_rule = false;
6584 vsi_handle = rinfo->sw_act.vsi_handle;
6585 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6587 vsi_handle = rinfo->sw_act.vsi_handle;
6588 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6590 ice_release_lock(rule_lock);
6593 if (list_elem->vsi_count == 0)
6596 ice_release_lock(rule_lock);
6598 struct ice_aqc_sw_rules_elem *s_rule;
6601 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
6603 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
6606 return ICE_ERR_NO_MEMORY;
6607 s_rule->pdata.lkup_tx_rx.act = 0;
6608 s_rule->pdata.lkup_tx_rx.index =
6609 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
6610 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
6611 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6613 ice_aqc_opc_remove_sw_rules, NULL);
6614 if (status == ICE_SUCCESS) {
6615 ice_acquire_lock(rule_lock);
6616 LIST_DEL(&list_elem->list_entry);
6617 ice_free(hw, list_elem->lkups);
6618 ice_free(hw, list_elem);
6619 ice_release_lock(rule_lock);
6621 ice_free(hw, s_rule);
6627 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6628 * @hw: pointer to the hardware structure
6629 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6631 * This function is used to remove 1 rule at a time. The removal is based on
6632 * the remove_entry parameter. This function will remove rule for a given
6633 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6636 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6637 struct ice_rule_query_data *remove_entry)
6639 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6640 struct LIST_HEAD_TYPE *list_head;
6641 struct ice_adv_rule_info rinfo;
6642 struct ice_switch_info *sw;
6644 sw = hw->switch_info;
6645 if (!sw->recp_list[remove_entry->rid].recp_created)
6646 return ICE_ERR_PARAM;
6647 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6648 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
6650 if (list_itr->rule_info.fltr_rule_id ==
6651 remove_entry->rule_id) {
6652 rinfo = list_itr->rule_info;
6653 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6654 return ice_rem_adv_rule(hw, list_itr->lkups,
6655 list_itr->lkups_cnt, &rinfo);
6658 return ICE_ERR_PARAM;
6662 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
6664 * @hw: pointer to the hardware structure
6665 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6667 * This function is used to remove all the rules for a given VSI and as soon
6668 * as removing a rule fails, it will return immediately with the error code,
6669 * else it will return ICE_SUCCESS
6672 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6674 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6675 struct ice_vsi_list_map_info *map_info;
6676 struct LIST_HEAD_TYPE *list_head;
6677 struct ice_adv_rule_info rinfo;
6678 struct ice_switch_info *sw;
6679 enum ice_status status;
6680 u16 vsi_list_id = 0;
6683 sw = hw->switch_info;
6684 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6685 if (!sw->recp_list[rid].recp_created)
6687 if (!sw->recp_list[rid].adv_rule)
6689 list_head = &sw->recp_list[rid].filt_rules;
6691 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6692 ice_adv_fltr_mgmt_list_entry, list_entry) {
6693 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6697 rinfo = list_itr->rule_info;
6698 rinfo.sw_act.vsi_handle = vsi_handle;
6699 status = ice_rem_adv_rule(hw, list_itr->lkups,
6700 list_itr->lkups_cnt, &rinfo);
6710 * ice_replay_fltr - Replay all the filters stored by a specific list head
6711 * @hw: pointer to the hardware structure
6712 * @list_head: list for which filters needs to be replayed
6713 * @recp_id: Recipe ID for which rules need to be replayed
6715 static enum ice_status
6716 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6718 struct ice_fltr_mgmt_list_entry *itr;
6719 struct LIST_HEAD_TYPE l_head;
6720 enum ice_status status = ICE_SUCCESS;
6722 if (LIST_EMPTY(list_head))
6725 /* Move entries from the given list_head to a temporary l_head so that
6726 * they can be replayed. Otherwise when trying to re-add the same
6727 * filter, the function will return already exists
6729 LIST_REPLACE_INIT(list_head, &l_head);
6731 /* Mark the given list_head empty by reinitializing it so filters
6732 * could be added again by *handler
6734 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6736 struct ice_fltr_list_entry f_entry;
6738 f_entry.fltr_info = itr->fltr_info;
6739 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6740 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6741 if (status != ICE_SUCCESS)
6746 /* Add a filter per VSI separately */
6751 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6753 if (!ice_is_vsi_valid(hw, vsi_handle))
6756 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6757 f_entry.fltr_info.vsi_handle = vsi_handle;
6758 f_entry.fltr_info.fwd_id.hw_vsi_id =
6759 ice_get_hw_vsi_num(hw, vsi_handle);
6760 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6761 if (recp_id == ICE_SW_LKUP_VLAN)
6762 status = ice_add_vlan_internal(hw, &f_entry);
6764 status = ice_add_rule_internal(hw, recp_id,
6766 if (status != ICE_SUCCESS)
6771 /* Clear the filter management list */
6772 ice_rem_sw_rule_info(hw, &l_head);
6777 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6778 * @hw: pointer to the hardware structure
6780 * NOTE: This function does not clean up partially added filters on error.
6781 * It is up to caller of the function to issue a reset or fail early.
6783 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6785 struct ice_switch_info *sw = hw->switch_info;
6786 enum ice_status status = ICE_SUCCESS;
6789 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6790 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6792 status = ice_replay_fltr(hw, i, head);
6793 if (status != ICE_SUCCESS)
6800 * ice_replay_vsi_fltr - Replay filters for requested VSI
6801 * @hw: pointer to the hardware structure
6802 * @vsi_handle: driver VSI handle
6803 * @recp_id: Recipe ID for which rules need to be replayed
6804 * @list_head: list for which filters need to be replayed
6806 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6807 * It is required to pass valid VSI handle.
6809 static enum ice_status
6810 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6811 struct LIST_HEAD_TYPE *list_head)
6813 struct ice_fltr_mgmt_list_entry *itr;
6814 enum ice_status status = ICE_SUCCESS;
6817 if (LIST_EMPTY(list_head))
6819 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6821 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6823 struct ice_fltr_list_entry f_entry;
6825 f_entry.fltr_info = itr->fltr_info;
6826 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6827 itr->fltr_info.vsi_handle == vsi_handle) {
6828 /* update the src in case it is VSI num */
6829 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6830 f_entry.fltr_info.src = hw_vsi_id;
6831 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6832 if (status != ICE_SUCCESS)
6836 if (!itr->vsi_list_info ||
6837 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6839 /* Clearing it so that the logic can add it back */
6840 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6841 f_entry.fltr_info.vsi_handle = vsi_handle;
6842 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6843 /* update the src in case it is VSI num */
6844 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6845 f_entry.fltr_info.src = hw_vsi_id;
6846 if (recp_id == ICE_SW_LKUP_VLAN)
6847 status = ice_add_vlan_internal(hw, &f_entry);
6849 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6850 if (status != ICE_SUCCESS)
6858 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6859 * @hw: pointer to the hardware structure
6860 * @vsi_handle: driver VSI handle
6861 * @list_head: list for which filters need to be replayed
6863 * Replay the advanced rule for the given VSI.
6865 static enum ice_status
6866 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6867 struct LIST_HEAD_TYPE *list_head)
6869 struct ice_rule_query_data added_entry = { 0 };
6870 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6871 enum ice_status status = ICE_SUCCESS;
6873 if (LIST_EMPTY(list_head))
6875 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6877 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6878 u16 lk_cnt = adv_fltr->lkups_cnt;
6880 if (vsi_handle != rinfo->sw_act.vsi_handle)
6882 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6891 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6892 * @hw: pointer to the hardware structure
6893 * @vsi_handle: driver VSI handle
6895 * Replays filters for requested VSI via vsi_handle.
6897 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6899 struct ice_switch_info *sw = hw->switch_info;
6900 enum ice_status status;
6903 /* Update the recipes that were created */
6904 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6905 struct LIST_HEAD_TYPE *head;
6907 head = &sw->recp_list[i].filt_replay_rules;
6908 if (!sw->recp_list[i].adv_rule)
6909 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6911 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6912 if (status != ICE_SUCCESS)
6920 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6921 * @hw: pointer to the HW struct
6923 * Deletes the filter replay rules.
6925 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6927 struct ice_switch_info *sw = hw->switch_info;
6933 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6934 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6935 struct LIST_HEAD_TYPE *l_head;
6937 l_head = &sw->recp_list[i].filt_replay_rules;
6938 if (!sw->recp_list[i].adv_rule)
6939 ice_rem_sw_rule_info(hw, l_head);
6941 ice_rem_adv_rule_info(hw, l_head);