69fa64ac9c17705efb76807cd8a951decdcf9736
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
14 #define ICE_PPP_IPV6_PROTO_ID           0x0057
15 #define ICE_IPV6_ETHER_ID               0x86DD
16 #define ICE_TCP_PROTO_ID                0x06
17
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19  * struct to configure any switch filter rules.
20  * {DA (6 bytes), SA(6 bytes),
21  * Ether type (2 bytes for header without VLAN tag) OR
22  * VLAN tag (4 bytes for header with VLAN tag) }
23  *
24  * Word on Hardcoded values
25  * byte 0 = 0x2: to identify it as locally administered DA MAC
26  * byte 6 = 0x2: to identify it as locally administered SA MAC
27  * byte 12 = 0x81 & byte 13 = 0x00:
28  *      In case of VLAN filter first two bytes defines ether type (0x8100)
29  *      and remaining two bytes are placeholder for programming a given VLAN ID
30  *      In case of Ether type filter it is treated as header without VLAN tag
31  *      and byte 12 and 13 is used to program a given Ether type instead
32  */
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34                                                         0x2, 0, 0, 0, 0, 0,
35                                                         0x81, 0, 0, 0};
36
37 struct ice_dummy_pkt_offsets {
38         enum ice_protocol_type type;
39         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
40 };
41
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
43         { ICE_MAC_OFOS,         0 },
44         { ICE_ETYPE_OL,         12 },
45         { ICE_IPV4_OFOS,        14 },
46         { ICE_NVGRE,            34 },
47         { ICE_MAC_IL,           42 },
48         { ICE_IPV4_IL,          56 },
49         { ICE_TCP_IL,           76 },
50         { ICE_PROTOCOL_LAST,    0 },
51 };
52
53 static const u8 dummy_gre_tcp_packet[] = {
54         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55         0x00, 0x00, 0x00, 0x00,
56         0x00, 0x00, 0x00, 0x00,
57
58         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
59
60         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61         0x00, 0x00, 0x00, 0x00,
62         0x00, 0x2F, 0x00, 0x00,
63         0x00, 0x00, 0x00, 0x00,
64         0x00, 0x00, 0x00, 0x00,
65
66         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67         0x00, 0x00, 0x00, 0x00,
68
69         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70         0x00, 0x00, 0x00, 0x00,
71         0x00, 0x00, 0x00, 0x00,
72         0x08, 0x00,
73
74         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75         0x00, 0x00, 0x00, 0x00,
76         0x00, 0x06, 0x00, 0x00,
77         0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00,
79
80         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81         0x00, 0x00, 0x00, 0x00,
82         0x00, 0x00, 0x00, 0x00,
83         0x50, 0x02, 0x20, 0x00,
84         0x00, 0x00, 0x00, 0x00
85 };
86
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
88         { ICE_MAC_OFOS,         0 },
89         { ICE_ETYPE_OL,         12 },
90         { ICE_IPV4_OFOS,        14 },
91         { ICE_NVGRE,            34 },
92         { ICE_MAC_IL,           42 },
93         { ICE_IPV4_IL,          56 },
94         { ICE_UDP_ILOS,         76 },
95         { ICE_PROTOCOL_LAST,    0 },
96 };
97
98 static const u8 dummy_gre_udp_packet[] = {
99         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100         0x00, 0x00, 0x00, 0x00,
101         0x00, 0x00, 0x00, 0x00,
102
103         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
104
105         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106         0x00, 0x00, 0x00, 0x00,
107         0x00, 0x2F, 0x00, 0x00,
108         0x00, 0x00, 0x00, 0x00,
109         0x00, 0x00, 0x00, 0x00,
110
111         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112         0x00, 0x00, 0x00, 0x00,
113
114         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115         0x00, 0x00, 0x00, 0x00,
116         0x00, 0x00, 0x00, 0x00,
117         0x08, 0x00,
118
119         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120         0x00, 0x00, 0x00, 0x00,
121         0x00, 0x11, 0x00, 0x00,
122         0x00, 0x00, 0x00, 0x00,
123         0x00, 0x00, 0x00, 0x00,
124
125         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126         0x00, 0x08, 0x00, 0x00,
127 };
128
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130         { ICE_MAC_OFOS,         0 },
131         { ICE_ETYPE_OL,         12 },
132         { ICE_IPV4_OFOS,        14 },
133         { ICE_UDP_OF,           34 },
134         { ICE_VXLAN,            42 },
135         { ICE_GENEVE,           42 },
136         { ICE_VXLAN_GPE,        42 },
137         { ICE_MAC_IL,           50 },
138         { ICE_IPV4_IL,          64 },
139         { ICE_TCP_IL,           84 },
140         { ICE_PROTOCOL_LAST,    0 },
141 };
142
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
145         0x00, 0x00, 0x00, 0x00,
146         0x00, 0x00, 0x00, 0x00,
147
148         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
149
150         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151         0x00, 0x01, 0x00, 0x00,
152         0x40, 0x11, 0x00, 0x00,
153         0x00, 0x00, 0x00, 0x00,
154         0x00, 0x00, 0x00, 0x00,
155
156         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157         0x00, 0x46, 0x00, 0x00,
158
159         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160         0x00, 0x00, 0x00, 0x00,
161
162         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163         0x00, 0x00, 0x00, 0x00,
164         0x00, 0x00, 0x00, 0x00,
165         0x08, 0x00,
166
167         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168         0x00, 0x01, 0x00, 0x00,
169         0x40, 0x06, 0x00, 0x00,
170         0x00, 0x00, 0x00, 0x00,
171         0x00, 0x00, 0x00, 0x00,
172
173         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174         0x00, 0x00, 0x00, 0x00,
175         0x00, 0x00, 0x00, 0x00,
176         0x50, 0x02, 0x20, 0x00,
177         0x00, 0x00, 0x00, 0x00
178 };
179
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181         { ICE_MAC_OFOS,         0 },
182         { ICE_ETYPE_OL,         12 },
183         { ICE_IPV4_OFOS,        14 },
184         { ICE_UDP_OF,           34 },
185         { ICE_VXLAN,            42 },
186         { ICE_GENEVE,           42 },
187         { ICE_VXLAN_GPE,        42 },
188         { ICE_MAC_IL,           50 },
189         { ICE_IPV4_IL,          64 },
190         { ICE_UDP_ILOS,         84 },
191         { ICE_PROTOCOL_LAST,    0 },
192 };
193
194 static const u8 dummy_udp_tun_udp_packet[] = {
195         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
196         0x00, 0x00, 0x00, 0x00,
197         0x00, 0x00, 0x00, 0x00,
198
199         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
200
201         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202         0x00, 0x01, 0x00, 0x00,
203         0x00, 0x11, 0x00, 0x00,
204         0x00, 0x00, 0x00, 0x00,
205         0x00, 0x00, 0x00, 0x00,
206
207         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208         0x00, 0x3a, 0x00, 0x00,
209
210         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211         0x00, 0x00, 0x00, 0x00,
212
213         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214         0x00, 0x00, 0x00, 0x00,
215         0x00, 0x00, 0x00, 0x00,
216         0x08, 0x00,
217
218         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219         0x00, 0x01, 0x00, 0x00,
220         0x00, 0x11, 0x00, 0x00,
221         0x00, 0x00, 0x00, 0x00,
222         0x00, 0x00, 0x00, 0x00,
223
224         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225         0x00, 0x08, 0x00, 0x00,
226 };
227
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230         { ICE_MAC_OFOS,         0 },
231         { ICE_ETYPE_OL,         12 },
232         { ICE_IPV4_OFOS,        14 },
233         { ICE_UDP_ILOS,         34 },
234         { ICE_PROTOCOL_LAST,    0 },
235 };
236
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240         0x00, 0x00, 0x00, 0x00,
241         0x00, 0x00, 0x00, 0x00,
242
243         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
244
245         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246         0x00, 0x01, 0x00, 0x00,
247         0x00, 0x11, 0x00, 0x00,
248         0x00, 0x00, 0x00, 0x00,
249         0x00, 0x00, 0x00, 0x00,
250
251         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252         0x00, 0x08, 0x00, 0x00,
253
254         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
255 };
256
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259         { ICE_MAC_OFOS,         0 },
260         { ICE_ETYPE_OL,         12 },
261         { ICE_VLAN_OFOS,        14 },
262         { ICE_IPV4_OFOS,        18 },
263         { ICE_UDP_ILOS,         38 },
264         { ICE_PROTOCOL_LAST,    0 },
265 };
266
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270         0x00, 0x00, 0x00, 0x00,
271         0x00, 0x00, 0x00, 0x00,
272
273         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
274
275         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276
277         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278         0x00, 0x01, 0x00, 0x00,
279         0x00, 0x11, 0x00, 0x00,
280         0x00, 0x00, 0x00, 0x00,
281         0x00, 0x00, 0x00, 0x00,
282
283         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284         0x00, 0x08, 0x00, 0x00,
285
286         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
287 };
288
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291         { ICE_MAC_OFOS,         0 },
292         { ICE_ETYPE_OL,         12 },
293         { ICE_IPV4_OFOS,        14 },
294         { ICE_TCP_IL,           34 },
295         { ICE_PROTOCOL_LAST,    0 },
296 };
297
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301         0x00, 0x00, 0x00, 0x00,
302         0x00, 0x00, 0x00, 0x00,
303
304         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
305
306         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307         0x00, 0x01, 0x00, 0x00,
308         0x00, 0x06, 0x00, 0x00,
309         0x00, 0x00, 0x00, 0x00,
310         0x00, 0x00, 0x00, 0x00,
311
312         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313         0x00, 0x00, 0x00, 0x00,
314         0x00, 0x00, 0x00, 0x00,
315         0x50, 0x00, 0x00, 0x00,
316         0x00, 0x00, 0x00, 0x00,
317
318         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
319 };
320
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323         { ICE_MAC_OFOS,         0 },
324         { ICE_ETYPE_OL,         12 },
325         { ICE_VLAN_OFOS,        14 },
326         { ICE_IPV4_OFOS,        18 },
327         { ICE_TCP_IL,           38 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
338
339         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340
341         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342         0x00, 0x01, 0x00, 0x00,
343         0x00, 0x06, 0x00, 0x00,
344         0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00, 0x00,
346
347         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348         0x00, 0x00, 0x00, 0x00,
349         0x00, 0x00, 0x00, 0x00,
350         0x50, 0x00, 0x00, 0x00,
351         0x00, 0x00, 0x00, 0x00,
352
353         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
354 };
355
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357         { ICE_MAC_OFOS,         0 },
358         { ICE_ETYPE_OL,         12 },
359         { ICE_IPV6_OFOS,        14 },
360         { ICE_TCP_IL,           54 },
361         { ICE_PROTOCOL_LAST,    0 },
362 };
363
364 static const u8 dummy_tcp_ipv6_packet[] = {
365         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
370
371         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373         0x00, 0x00, 0x00, 0x00,
374         0x00, 0x00, 0x00, 0x00,
375         0x00, 0x00, 0x00, 0x00,
376         0x00, 0x00, 0x00, 0x00,
377         0x00, 0x00, 0x00, 0x00,
378         0x00, 0x00, 0x00, 0x00,
379         0x00, 0x00, 0x00, 0x00,
380         0x00, 0x00, 0x00, 0x00,
381
382         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383         0x00, 0x00, 0x00, 0x00,
384         0x00, 0x00, 0x00, 0x00,
385         0x50, 0x00, 0x00, 0x00,
386         0x00, 0x00, 0x00, 0x00,
387
388         0x00, 0x00, /* 2 bytes for 4 byte alignment */
389 };
390
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394         { ICE_MAC_OFOS,         0 },
395         { ICE_ETYPE_OL,         12 },
396         { ICE_VLAN_OFOS,        14 },
397         { ICE_IPV6_OFOS,        18 },
398         { ICE_TCP_IL,           58 },
399         { ICE_PROTOCOL_LAST,    0 },
400 };
401
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407
408         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
409
410         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411
412         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414         0x00, 0x00, 0x00, 0x00,
415         0x00, 0x00, 0x00, 0x00,
416         0x00, 0x00, 0x00, 0x00,
417         0x00, 0x00, 0x00, 0x00,
418         0x00, 0x00, 0x00, 0x00,
419         0x00, 0x00, 0x00, 0x00,
420         0x00, 0x00, 0x00, 0x00,
421         0x00, 0x00, 0x00, 0x00,
422
423         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424         0x00, 0x00, 0x00, 0x00,
425         0x00, 0x00, 0x00, 0x00,
426         0x50, 0x00, 0x00, 0x00,
427         0x00, 0x00, 0x00, 0x00,
428
429         0x00, 0x00, /* 2 bytes for 4 byte alignment */
430 };
431
432 /* IPv6 + UDP */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434         { ICE_MAC_OFOS,         0 },
435         { ICE_ETYPE_OL,         12 },
436         { ICE_IPV6_OFOS,        14 },
437         { ICE_UDP_ILOS,         54 },
438         { ICE_PROTOCOL_LAST,    0 },
439 };
440
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444         0x00, 0x00, 0x00, 0x00,
445         0x00, 0x00, 0x00, 0x00,
446
447         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
448
449         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451         0x00, 0x00, 0x00, 0x00,
452         0x00, 0x00, 0x00, 0x00,
453         0x00, 0x00, 0x00, 0x00,
454         0x00, 0x00, 0x00, 0x00,
455         0x00, 0x00, 0x00, 0x00,
456         0x00, 0x00, 0x00, 0x00,
457         0x00, 0x00, 0x00, 0x00,
458         0x00, 0x00, 0x00, 0x00,
459
460         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461         0x00, 0x10, 0x00, 0x00,
462
463         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464         0x00, 0x00, 0x00, 0x00,
465
466         0x00, 0x00, /* 2 bytes for 4 byte alignment */
467 };
468
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
472         { ICE_MAC_OFOS,         0 },
473         { ICE_ETYPE_OL,         12 },
474         { ICE_VLAN_OFOS,        14 },
475         { ICE_IPV6_OFOS,        18 },
476         { ICE_UDP_ILOS,         58 },
477         { ICE_PROTOCOL_LAST,    0 },
478 };
479
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483         0x00, 0x00, 0x00, 0x00,
484         0x00, 0x00, 0x00, 0x00,
485
486         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
487
488         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489
490         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492         0x00, 0x00, 0x00, 0x00,
493         0x00, 0x00, 0x00, 0x00,
494         0x00, 0x00, 0x00, 0x00,
495         0x00, 0x00, 0x00, 0x00,
496         0x00, 0x00, 0x00, 0x00,
497         0x00, 0x00, 0x00, 0x00,
498         0x00, 0x00, 0x00, 0x00,
499         0x00, 0x00, 0x00, 0x00,
500
501         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502         0x00, 0x08, 0x00, 0x00,
503
504         0x00, 0x00, /* 2 bytes for 4 byte alignment */
505 };
506
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508         { ICE_MAC_OFOS,         0 },
509         { ICE_IPV4_OFOS,        14 },
510         { ICE_UDP_OF,           34 },
511         { ICE_GTP,              42 },
512         { ICE_PROTOCOL_LAST,    0 },
513 };
514
515 static const u8 dummy_udp_gtp_packet[] = {
516         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x08, 0x00,
520
521         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522         0x00, 0x00, 0x00, 0x00,
523         0x00, 0x11, 0x00, 0x00,
524         0x00, 0x00, 0x00, 0x00,
525         0x00, 0x00, 0x00, 0x00,
526
527         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528         0x00, 0x1c, 0x00, 0x00,
529
530         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531         0x00, 0x00, 0x00, 0x00,
532         0x00, 0x00, 0x00, 0x85,
533
534         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535         0x00, 0x00, 0x00, 0x00,
536 };
537
538 static const
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
540         { ICE_MAC_OFOS,         0 },
541         { ICE_IPV4_OFOS,        14 },
542         { ICE_UDP_OF,           34 },
543         { ICE_GTP,              42 },
544         { ICE_IPV4_IL,          62 },
545         { ICE_PROTOCOL_LAST,    0 },
546 };
547
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550         0x00, 0x00, 0x00, 0x00,
551         0x00, 0x00, 0x00, 0x00,
552         0x08, 0x00,
553
554         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555         0x00, 0x00, 0x40, 0x00,
556         0x40, 0x11, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x00,
558         0x00, 0x00, 0x00, 0x00,
559
560         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561         0x00, 0x00, 0x00, 0x00,
562
563         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
564         0x00, 0x00, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x85,
566
567         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568         0x00, 0x00, 0x00, 0x00,
569
570         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571         0x00, 0x00, 0x40, 0x00,
572         0x40, 0x00, 0x00, 0x00,
573         0x00, 0x00, 0x00, 0x00,
574         0x00, 0x00, 0x00, 0x00,
575         0x00, 0x00,
576 };
577
578 static const
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
580         { ICE_MAC_OFOS,         0 },
581         { ICE_IPV4_OFOS,        14 },
582         { ICE_UDP_OF,           34 },
583         { ICE_GTP,              42 },
584         { ICE_IPV6_IL,          62 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595         0x00, 0x00, 0x40, 0x00,
596         0x40, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601         0x00, 0x00, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611         0x00, 0x00, 0x3b, 0x00,
612         0x00, 0x00, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615         0x00, 0x00, 0x00, 0x00,
616         0x00, 0x00, 0x00, 0x00,
617         0x00, 0x00, 0x00, 0x00,
618         0x00, 0x00, 0x00, 0x00,
619         0x00, 0x00, 0x00, 0x00,
620
621         0x00, 0x00,
622 };
623
624 static const
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
626         { ICE_MAC_OFOS,         0 },
627         { ICE_IPV6_OFOS,        14 },
628         { ICE_UDP_OF,           54 },
629         { ICE_GTP,              62 },
630         { ICE_IPV4_IL,          82 },
631         { ICE_PROTOCOL_LAST,    0 },
632 };
633
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636         0x00, 0x00, 0x00, 0x00,
637         0x00, 0x00, 0x00, 0x00,
638         0x86, 0xdd,
639
640         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644         0x00, 0x00, 0x00, 0x00,
645         0x00, 0x00, 0x00, 0x00,
646         0x00, 0x00, 0x00, 0x00,
647         0x00, 0x00, 0x00, 0x00,
648         0x00, 0x00, 0x00, 0x00,
649         0x00, 0x00, 0x00, 0x00,
650
651         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652         0x00, 0x00, 0x00, 0x00,
653
654         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
655         0x00, 0x00, 0x00, 0x00,
656         0x00, 0x00, 0x00, 0x85,
657
658         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659         0x00, 0x00, 0x00, 0x00,
660
661         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662         0x00, 0x00, 0x40, 0x00,
663         0x40, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665         0x00, 0x00, 0x00, 0x00,
666
667         0x00, 0x00,
668 };
669
670 static const
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
672         { ICE_MAC_OFOS,         0 },
673         { ICE_IPV6_OFOS,        14 },
674         { ICE_UDP_OF,           54 },
675         { ICE_GTP,              62 },
676         { ICE_IPV6_IL,          82 },
677         { ICE_PROTOCOL_LAST,    0 },
678 };
679
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682         0x00, 0x00, 0x00, 0x00,
683         0x00, 0x00, 0x00, 0x00,
684         0x86, 0xdd,
685
686         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688         0x00, 0x00, 0x00, 0x00,
689         0x00, 0x00, 0x00, 0x00,
690         0x00, 0x00, 0x00, 0x00,
691         0x00, 0x00, 0x00, 0x00,
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x00, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698         0x00, 0x00, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708         0x00, 0x00, 0x3b, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00,
719 };
720
721 static const
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
723         { ICE_MAC_OFOS,         0 },
724         { ICE_IPV4_OFOS,        14 },
725         { ICE_UDP_OF,           34 },
726         { ICE_GTP_NO_PAY,       42 },
727         { ICE_PROTOCOL_LAST,    0 },
728 };
729
730 static const
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
732         { ICE_MAC_OFOS,         0 },
733         { ICE_IPV6_OFOS,        14 },
734         { ICE_UDP_OF,           54 },
735         { ICE_GTP_NO_PAY,       62 },
736         { ICE_PROTOCOL_LAST,    0 },
737 };
738
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
740         { ICE_MAC_OFOS,         0 },
741         { ICE_ETYPE_OL,         12 },
742         { ICE_VLAN_OFOS,        14},
743         { ICE_PPPOE,            18 },
744         { ICE_PROTOCOL_LAST,    0 },
745 };
746
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
748         { ICE_MAC_OFOS,         0 },
749         { ICE_ETYPE_OL,         12 },
750         { ICE_VLAN_OFOS,        14},
751         { ICE_PPPOE,            18 },
752         { ICE_IPV4_OFOS,        26 },
753         { ICE_PROTOCOL_LAST,    0 },
754 };
755
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758         0x00, 0x00, 0x00, 0x00,
759         0x00, 0x00, 0x00, 0x00,
760
761         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
762
763         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
764
765         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
766         0x00, 0x16,
767
768         0x00, 0x21,             /* PPP Link Layer 24 */
769
770         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771         0x00, 0x00, 0x00, 0x00,
772         0x00, 0x00, 0x00, 0x00,
773         0x00, 0x00, 0x00, 0x00,
774         0x00, 0x00, 0x00, 0x00,
775
776         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
777 };
778
779 static const
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
781         { ICE_MAC_OFOS,         0 },
782         { ICE_ETYPE_OL,         12 },
783         { ICE_VLAN_OFOS,        14},
784         { ICE_PPPOE,            18 },
785         { ICE_IPV4_OFOS,        26 },
786         { ICE_TCP_IL,           46 },
787         { ICE_PROTOCOL_LAST,    0 },
788 };
789
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792         0x00, 0x00, 0x00, 0x00,
793         0x00, 0x00, 0x00, 0x00,
794
795         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
796
797         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
798
799         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
800         0x00, 0x16,
801
802         0x00, 0x21,             /* PPP Link Layer 24 */
803
804         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805         0x00, 0x01, 0x00, 0x00,
806         0x00, 0x06, 0x00, 0x00,
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x00,
809
810         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811         0x00, 0x00, 0x00, 0x00,
812         0x00, 0x00, 0x00, 0x00,
813         0x50, 0x00, 0x00, 0x00,
814         0x00, 0x00, 0x00, 0x00,
815
816         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
817 };
818
819 static const
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
821         { ICE_MAC_OFOS,         0 },
822         { ICE_ETYPE_OL,         12 },
823         { ICE_VLAN_OFOS,        14},
824         { ICE_PPPOE,            18 },
825         { ICE_IPV4_OFOS,        26 },
826         { ICE_UDP_ILOS,         46 },
827         { ICE_PROTOCOL_LAST,    0 },
828 };
829
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832         0x00, 0x00, 0x00, 0x00,
833         0x00, 0x00, 0x00, 0x00,
834
835         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
836
837         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
838
839         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
840         0x00, 0x16,
841
842         0x00, 0x21,             /* PPP Link Layer 24 */
843
844         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845         0x00, 0x01, 0x00, 0x00,
846         0x00, 0x11, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849
850         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851         0x00, 0x08, 0x00, 0x00,
852
853         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
854 };
855
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
857         { ICE_MAC_OFOS,         0 },
858         { ICE_ETYPE_OL,         12 },
859         { ICE_VLAN_OFOS,        14},
860         { ICE_PPPOE,            18 },
861         { ICE_IPV6_OFOS,        26 },
862         { ICE_PROTOCOL_LAST,    0 },
863 };
864
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869
870         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
871
872         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
873
874         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
875         0x00, 0x2a,
876
877         0x00, 0x57,             /* PPP Link Layer 24 */
878
879         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880         0x00, 0x00, 0x3b, 0x00,
881         0x00, 0x00, 0x00, 0x00,
882         0x00, 0x00, 0x00, 0x00,
883         0x00, 0x00, 0x00, 0x00,
884         0x00, 0x00, 0x00, 0x00,
885         0x00, 0x00, 0x00, 0x00,
886         0x00, 0x00, 0x00, 0x00,
887         0x00, 0x00, 0x00, 0x00,
888         0x00, 0x00, 0x00, 0x00,
889
890         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
891 };
892
893 static const
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
895         { ICE_MAC_OFOS,         0 },
896         { ICE_ETYPE_OL,         12 },
897         { ICE_VLAN_OFOS,        14},
898         { ICE_PPPOE,            18 },
899         { ICE_IPV6_OFOS,        26 },
900         { ICE_TCP_IL,           66 },
901         { ICE_PROTOCOL_LAST,    0 },
902 };
903
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
910
911         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
912
913         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
914         0x00, 0x2a,
915
916         0x00, 0x57,             /* PPP Link Layer 24 */
917
918         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920         0x00, 0x00, 0x00, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928
929         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930         0x00, 0x00, 0x00, 0x00,
931         0x00, 0x00, 0x00, 0x00,
932         0x50, 0x00, 0x00, 0x00,
933         0x00, 0x00, 0x00, 0x00,
934
935         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
936 };
937
938 static const
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
940         { ICE_MAC_OFOS,         0 },
941         { ICE_ETYPE_OL,         12 },
942         { ICE_VLAN_OFOS,        14},
943         { ICE_PPPOE,            18 },
944         { ICE_IPV6_OFOS,        26 },
945         { ICE_UDP_ILOS,         66 },
946         { ICE_PROTOCOL_LAST,    0 },
947 };
948
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951         0x00, 0x00, 0x00, 0x00,
952         0x00, 0x00, 0x00, 0x00,
953
954         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
955
956         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
957
958         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
959         0x00, 0x2a,
960
961         0x00, 0x57,             /* PPP Link Layer 24 */
962
963         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965         0x00, 0x00, 0x00, 0x00,
966         0x00, 0x00, 0x00, 0x00,
967         0x00, 0x00, 0x00, 0x00,
968         0x00, 0x00, 0x00, 0x00,
969         0x00, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00, 0x00, 0x00,
973
974         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975         0x00, 0x08, 0x00, 0x00,
976
977         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
978 };
979
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
981         { ICE_MAC_OFOS,         0 },
982         { ICE_IPV4_OFOS,        14 },
983         { ICE_ESP,                      34 },
984         { ICE_PROTOCOL_LAST,    0 },
985 };
986
987 static const u8 dummy_ipv4_esp_pkt[] = {
988         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989         0x00, 0x00, 0x00, 0x00,
990         0x00, 0x00, 0x00, 0x00,
991         0x08, 0x00,
992
993         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994         0x00, 0x00, 0x40, 0x00,
995         0x40, 0x32, 0x00, 0x00,
996         0x00, 0x00, 0x00, 0x00,
997         0x00, 0x00, 0x00, 0x00,
998
999         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000         0x00, 0x00, 0x00, 0x00,
1001         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1002 };
1003
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005         { ICE_MAC_OFOS,         0 },
1006         { ICE_IPV6_OFOS,        14 },
1007         { ICE_ESP,                      54 },
1008         { ICE_PROTOCOL_LAST,    0 },
1009 };
1010
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x86, 0xDD,
1016
1017         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019         0x00, 0x00, 0x00, 0x00,
1020         0x00, 0x00, 0x00, 0x00,
1021         0x00, 0x00, 0x00, 0x00,
1022         0x00, 0x00, 0x00, 0x00,
1023         0x00, 0x00, 0x00, 0x00,
1024         0x00, 0x00, 0x00, 0x00,
1025         0x00, 0x00, 0x00, 0x00,
1026         0x00, 0x00, 0x00, 0x00,
1027
1028         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029         0x00, 0x00, 0x00, 0x00,
1030         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1031 };
1032
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034         { ICE_MAC_OFOS,         0 },
1035         { ICE_IPV4_OFOS,        14 },
1036         { ICE_AH,                       34 },
1037         { ICE_PROTOCOL_LAST,    0 },
1038 };
1039
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x08, 0x00,
1045
1046         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047         0x00, 0x00, 0x40, 0x00,
1048         0x40, 0x33, 0x00, 0x00,
1049         0x00, 0x00, 0x00, 0x00,
1050         0x00, 0x00, 0x00, 0x00,
1051
1052         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053         0x00, 0x00, 0x00, 0x00,
1054         0x00, 0x00, 0x00, 0x00,
1055         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1056 };
1057
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059         { ICE_MAC_OFOS,         0 },
1060         { ICE_IPV6_OFOS,        14 },
1061         { ICE_AH,                       54 },
1062         { ICE_PROTOCOL_LAST,    0 },
1063 };
1064
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067         0x00, 0x00, 0x00, 0x00,
1068         0x00, 0x00, 0x00, 0x00,
1069         0x86, 0xDD,
1070
1071         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073         0x00, 0x00, 0x00, 0x00,
1074         0x00, 0x00, 0x00, 0x00,
1075         0x00, 0x00, 0x00, 0x00,
1076         0x00, 0x00, 0x00, 0x00,
1077         0x00, 0x00, 0x00, 0x00,
1078         0x00, 0x00, 0x00, 0x00,
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081
1082         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083         0x00, 0x00, 0x00, 0x00,
1084         0x00, 0x00, 0x00, 0x00,
1085         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1086 };
1087
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089         { ICE_MAC_OFOS,         0 },
1090         { ICE_IPV4_OFOS,        14 },
1091         { ICE_UDP_ILOS,         34 },
1092         { ICE_NAT_T,            42 },
1093         { ICE_PROTOCOL_LAST,    0 },
1094 };
1095
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x00,
1100         0x08, 0x00,
1101
1102         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103         0x00, 0x00, 0x40, 0x00,
1104         0x40, 0x11, 0x00, 0x00,
1105         0x00, 0x00, 0x00, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107
1108         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109         0x00, 0x00, 0x00, 0x00,
1110
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1114 };
1115
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117         { ICE_MAC_OFOS,         0 },
1118         { ICE_IPV6_OFOS,        14 },
1119         { ICE_UDP_ILOS,         54 },
1120         { ICE_NAT_T,            62 },
1121         { ICE_PROTOCOL_LAST,    0 },
1122 };
1123
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126         0x00, 0x00, 0x00, 0x00,
1127         0x00, 0x00, 0x00, 0x00,
1128         0x86, 0xDD,
1129
1130         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132         0x00, 0x00, 0x00, 0x00,
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x00, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137         0x00, 0x00, 0x00, 0x00,
1138         0x00, 0x00, 0x00, 0x00,
1139         0x00, 0x00, 0x00, 0x00,
1140
1141         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142         0x00, 0x00, 0x00, 0x00,
1143
1144         0x00, 0x00, 0x00, 0x00,
1145         0x00, 0x00, 0x00, 0x00,
1146         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_L2TPV3,           34 },
1154         { ICE_PROTOCOL_LAST,    0 },
1155 };
1156
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159         0x00, 0x00, 0x00, 0x00,
1160         0x00, 0x00, 0x00, 0x00,
1161         0x08, 0x00,
1162
1163         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164         0x00, 0x00, 0x40, 0x00,
1165         0x40, 0x73, 0x00, 0x00,
1166         0x00, 0x00, 0x00, 0x00,
1167         0x00, 0x00, 0x00, 0x00,
1168
1169         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170         0x00, 0x00, 0x00, 0x00,
1171         0x00, 0x00, 0x00, 0x00,
1172         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1173 };
1174
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176         { ICE_MAC_OFOS,         0 },
1177         { ICE_IPV6_OFOS,        14 },
1178         { ICE_L2TPV3,           54 },
1179         { ICE_PROTOCOL_LAST,    0 },
1180 };
1181
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184         0x00, 0x00, 0x00, 0x00,
1185         0x00, 0x00, 0x00, 0x00,
1186         0x86, 0xDD,
1187
1188         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189         0x00, 0x0c, 0x73, 0x40,
1190         0x00, 0x00, 0x00, 0x00,
1191         0x00, 0x00, 0x00, 0x00,
1192         0x00, 0x00, 0x00, 0x00,
1193         0x00, 0x00, 0x00, 0x00,
1194         0x00, 0x00, 0x00, 0x00,
1195         0x00, 0x00, 0x00, 0x00,
1196         0x00, 0x00, 0x00, 0x00,
1197         0x00, 0x00, 0x00, 0x00,
1198
1199         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200         0x00, 0x00, 0x00, 0x00,
1201         0x00, 0x00, 0x00, 0x00,
1202         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1203 };
1204
1205 /* this is a recipe to profile association bitmap */
1206 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1207                           ICE_MAX_NUM_PROFILES);
1208
1209 /* this is a profile to recipe association bitmap */
1210 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1211                           ICE_MAX_NUM_RECIPES);
1212
1213 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1214
1215 /**
1216  * ice_collect_result_idx - copy result index values
1217  * @buf: buffer that contains the result index
1218  * @recp: the recipe struct to copy data into
1219  */
1220 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1221                                    struct ice_sw_recipe *recp)
1222 {
1223         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1224                 ice_set_bit(buf->content.result_indx &
1225                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1226 }
1227
1228 /**
1229  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1230  * @rid: recipe ID that we are populating
1231  */
1232 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1233 {
1234         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1235         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1236         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1237         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1238         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1239         u16 i, j, profile_num = 0;
1240         bool non_tun_valid = false;
1241         bool pppoe_valid = false;
1242         bool vxlan_valid = false;
1243         bool gre_valid = false;
1244         bool gtp_valid = false;
1245         bool flag_valid = false;
1246
1247         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1248                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1249                         continue;
1250                 else
1251                         profile_num++;
1252
1253                 for (i = 0; i < 12; i++) {
1254                         if (gre_profile[i] == j)
1255                                 gre_valid = true;
1256                 }
1257
1258                 for (i = 0; i < 12; i++) {
1259                         if (vxlan_profile[i] == j)
1260                                 vxlan_valid = true;
1261                 }
1262
1263                 for (i = 0; i < 7; i++) {
1264                         if (pppoe_profile[i] == j)
1265                                 pppoe_valid = true;
1266                 }
1267
1268                 for (i = 0; i < 6; i++) {
1269                         if (non_tun_profile[i] == j)
1270                                 non_tun_valid = true;
1271                 }
1272
1273                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1274                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1275                         gtp_valid = true;
1276
1277                 if ((j >= ICE_PROFID_IPV4_ESP &&
1278                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1279                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1280                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1281                         flag_valid = true;
1282         }
1283
1284         if (!non_tun_valid && vxlan_valid)
1285                 tun_type = ICE_SW_TUN_VXLAN;
1286         else if (!non_tun_valid && gre_valid)
1287                 tun_type = ICE_SW_TUN_NVGRE;
1288         else if (!non_tun_valid && pppoe_valid)
1289                 tun_type = ICE_SW_TUN_PPPOE;
1290         else if (!non_tun_valid && gtp_valid)
1291                 tun_type = ICE_SW_TUN_GTP;
1292         else if (non_tun_valid &&
1293                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1294                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1295         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1296                  !pppoe_valid)
1297                 tun_type = ICE_NON_TUN;
1298         else
1299                 tun_type = ICE_NON_TUN;
1300
1301         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1302                 i = ice_is_bit_set(recipe_to_profile[rid],
1303                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1304                 j = ice_is_bit_set(recipe_to_profile[rid],
1305                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1306                 if (i && !j)
1307                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1308                 else if (!i && j)
1309                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1310         }
1311
1312         if (tun_type == ICE_SW_TUN_GTP) {
1313                 if (ice_is_bit_set(recipe_to_profile[rid],
1314                                    ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1315                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1316                 else if (ice_is_bit_set(recipe_to_profile[rid],
1317                                         ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1318                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1319                 else if (ice_is_bit_set(recipe_to_profile[rid],
1320                                         ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1321                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1322                 else if (ice_is_bit_set(recipe_to_profile[rid],
1323                                         ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1324                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1325         }
1326
1327         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1328                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1329                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1330                                 switch (j) {
1331                                 case ICE_PROFID_IPV4_TCP:
1332                                         tun_type = ICE_SW_IPV4_TCP;
1333                                         break;
1334                                 case ICE_PROFID_IPV4_UDP:
1335                                         tun_type = ICE_SW_IPV4_UDP;
1336                                         break;
1337                                 case ICE_PROFID_IPV6_TCP:
1338                                         tun_type = ICE_SW_IPV6_TCP;
1339                                         break;
1340                                 case ICE_PROFID_IPV6_UDP:
1341                                         tun_type = ICE_SW_IPV6_UDP;
1342                                         break;
1343                                 case ICE_PROFID_PPPOE_PAY:
1344                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1345                                         break;
1346                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1347                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1348                                         break;
1349                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1350                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1351                                         break;
1352                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1353                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1354                                         break;
1355                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1356                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1357                                         break;
1358                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1359                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1360                                         break;
1361                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1362                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1363                                         break;
1364                                 case ICE_PROFID_IPV4_ESP:
1365                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1366                                         break;
1367                                 case ICE_PROFID_IPV6_ESP:
1368                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1369                                         break;
1370                                 case ICE_PROFID_IPV4_AH:
1371                                         tun_type = ICE_SW_TUN_IPV4_AH;
1372                                         break;
1373                                 case ICE_PROFID_IPV6_AH:
1374                                         tun_type = ICE_SW_TUN_IPV6_AH;
1375                                         break;
1376                                 case ICE_PROFID_IPV4_NAT_T:
1377                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
1378                                         break;
1379                                 case ICE_PROFID_IPV6_NAT_T:
1380                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
1381                                         break;
1382                                 case ICE_PROFID_IPV4_PFCP_NODE:
1383                                         tun_type =
1384                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1385                                         break;
1386                                 case ICE_PROFID_IPV6_PFCP_NODE:
1387                                         tun_type =
1388                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1389                                         break;
1390                                 case ICE_PROFID_IPV4_PFCP_SESSION:
1391                                         tun_type =
1392                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1393                                         break;
1394                                 case ICE_PROFID_IPV6_PFCP_SESSION:
1395                                         tun_type =
1396                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1397                                         break;
1398                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
1399                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1400                                         break;
1401                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
1402                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1403                                         break;
1404                                 case ICE_PROFID_IPV4_GTPU_TEID:
1405                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1406                                         break;
1407                                 case ICE_PROFID_IPV6_GTPU_TEID:
1408                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1409                                         break;
1410                                 default:
1411                                         break;
1412                                 }
1413
1414                                 return tun_type;
1415                         }
1416                 }
1417         }
1418
1419         return tun_type;
1420 }
1421
1422 /**
1423  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1424  * @hw: pointer to hardware structure
1425  * @recps: struct that we need to populate
1426  * @rid: recipe ID that we are populating
1427  * @refresh_required: true if we should get recipe to profile mapping from FW
1428  *
1429  * This function is used to populate all the necessary entries into our
1430  * bookkeeping so that we have a current list of all the recipes that are
1431  * programmed in the firmware.
1432  */
1433 static enum ice_status
1434 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1435                     bool *refresh_required)
1436 {
1437         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1438         struct ice_aqc_recipe_data_elem *tmp;
1439         u16 num_recps = ICE_MAX_NUM_RECIPES;
1440         struct ice_prot_lkup_ext *lkup_exts;
1441         enum ice_status status;
1442         u8 fv_word_idx = 0;
1443         u16 sub_recps;
1444
1445         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1446
1447         /* we need a buffer big enough to accommodate all the recipes */
1448         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1449                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1450         if (!tmp)
1451                 return ICE_ERR_NO_MEMORY;
1452
1453         tmp[0].recipe_indx = rid;
1454         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1455         /* non-zero status meaning recipe doesn't exist */
1456         if (status)
1457                 goto err_unroll;
1458
1459         /* Get recipe to profile map so that we can get the fv from lkups that
1460          * we read for a recipe from FW. Since we want to minimize the number of
1461          * times we make this FW call, just make one call and cache the copy
1462          * until a new recipe is added. This operation is only required the
1463          * first time to get the changes from FW. Then to search existing
1464          * entries we don't need to update the cache again until another recipe
1465          * gets added.
1466          */
1467         if (*refresh_required) {
1468                 ice_get_recp_to_prof_map(hw);
1469                 *refresh_required = false;
1470         }
1471
1472         /* Start populating all the entries for recps[rid] based on lkups from
1473          * firmware. Note that we are only creating the root recipe in our
1474          * database.
1475          */
1476         lkup_exts = &recps[rid].lkup_exts;
1477
1478         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1479                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1480                 struct ice_recp_grp_entry *rg_entry;
1481                 u8 i, prof, idx, prot = 0;
1482                 bool is_root;
1483                 u16 off = 0;
1484
1485                 rg_entry = (struct ice_recp_grp_entry *)
1486                         ice_malloc(hw, sizeof(*rg_entry));
1487                 if (!rg_entry) {
1488                         status = ICE_ERR_NO_MEMORY;
1489                         goto err_unroll;
1490                 }
1491
1492                 idx = root_bufs.recipe_indx;
1493                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1494
1495                 /* Mark all result indices in this chain */
1496                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1497                         ice_set_bit(root_bufs.content.result_indx &
1498                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1499
1500                 /* get the first profile that is associated with rid */
1501                 prof = ice_find_first_bit(recipe_to_profile[idx],
1502                                           ICE_MAX_NUM_PROFILES);
1503                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1504                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1505
1506                         rg_entry->fv_idx[i] = lkup_indx;
1507                         rg_entry->fv_mask[i] =
1508                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1509
1510                         /* If the recipe is a chained recipe then all its
1511                          * child recipe's result will have a result index.
1512                          * To fill fv_words we should not use those result
1513                          * index, we only need the protocol ids and offsets.
1514                          * We will skip all the fv_idx which stores result
1515                          * index in them. We also need to skip any fv_idx which
1516                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1517                          * valid offset value.
1518                          */
1519                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1520                                            rg_entry->fv_idx[i]) ||
1521                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1522                             rg_entry->fv_idx[i] == 0)
1523                                 continue;
1524
1525                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
1526                                           rg_entry->fv_idx[i], &prot, &off);
1527                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1528                         lkup_exts->fv_words[fv_word_idx].off = off;
1529                         lkup_exts->field_mask[fv_word_idx] =
1530                                 rg_entry->fv_mask[i];
1531                         fv_word_idx++;
1532                 }
1533                 /* populate rg_list with the data from the child entry of this
1534                  * recipe
1535                  */
1536                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1537
1538                 /* Propagate some data to the recipe database */
1539                 recps[idx].is_root = !!is_root;
1540                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1541                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1542                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1543                         recps[idx].chain_idx = root_bufs.content.result_indx &
1544                                 ~ICE_AQ_RECIPE_RESULT_EN;
1545                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1546                 } else {
1547                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1548                 }
1549
1550                 if (!is_root)
1551                         continue;
1552
1553                 /* Only do the following for root recipes entries */
1554                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1555                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1556                 recps[idx].root_rid = root_bufs.content.rid &
1557                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
1558                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1559         }
1560
1561         /* Complete initialization of the root recipe entry */
1562         lkup_exts->n_val_words = fv_word_idx;
1563         recps[rid].big_recp = (num_recps > 1);
1564         recps[rid].n_grp_count = (u8)num_recps;
1565         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1566         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1567                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1568                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1569         if (!recps[rid].root_buf)
1570                 goto err_unroll;
1571
1572         /* Copy result indexes */
1573         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1574         recps[rid].recp_created = true;
1575
1576 err_unroll:
1577         ice_free(hw, tmp);
1578         return status;
1579 }
1580
1581 /**
1582  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1583  * @hw: pointer to hardware structure
1584  *
1585  * This function is used to populate recipe_to_profile matrix where index to
1586  * this array is the recipe ID and the element is the mapping of which profiles
1587  * is this recipe mapped to.
1588  */
1589 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1590 {
1591         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1592         u16 i;
1593
1594         for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1595                 u16 j;
1596
1597                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1598                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1599                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1600                         continue;
1601                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1602                               ICE_MAX_NUM_RECIPES);
1603                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1604                         ice_set_bit(i, recipe_to_profile[j]);
1605         }
1606 }
1607
1608 /**
1609  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1610  * @hw: pointer to the HW struct
1611  * @recp_list: pointer to sw recipe list
1612  *
1613  * Allocate memory for the entire recipe table and initialize the structures/
1614  * entries corresponding to basic recipes.
1615  */
1616 enum ice_status
1617 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1618 {
1619         struct ice_sw_recipe *recps;
1620         u8 i;
1621
1622         recps = (struct ice_sw_recipe *)
1623                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1624         if (!recps)
1625                 return ICE_ERR_NO_MEMORY;
1626
1627         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1628                 recps[i].root_rid = i;
1629                 INIT_LIST_HEAD(&recps[i].filt_rules);
1630                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1631                 INIT_LIST_HEAD(&recps[i].rg_list);
1632                 ice_init_lock(&recps[i].filt_rule_lock);
1633         }
1634
1635         *recp_list = recps;
1636
1637         return ICE_SUCCESS;
1638 }
1639
1640 /**
1641  * ice_aq_get_sw_cfg - get switch configuration
1642  * @hw: pointer to the hardware structure
1643  * @buf: pointer to the result buffer
1644  * @buf_size: length of the buffer available for response
1645  * @req_desc: pointer to requested descriptor
1646  * @num_elems: pointer to number of elements
1647  * @cd: pointer to command details structure or NULL
1648  *
1649  * Get switch configuration (0x0200) to be placed in buf.
1650  * This admin command returns information such as initial VSI/port number
1651  * and switch ID it belongs to.
1652  *
1653  * NOTE: *req_desc is both an input/output parameter.
1654  * The caller of this function first calls this function with *request_desc set
1655  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1656  * configuration information has been returned; if non-zero (meaning not all
1657  * the information was returned), the caller should call this function again
1658  * with *req_desc set to the previous value returned by f/w to get the
1659  * next block of switch configuration information.
1660  *
1661  * *num_elems is output only parameter. This reflects the number of elements
1662  * in response buffer. The caller of this function to use *num_elems while
1663  * parsing the response buffer.
1664  */
1665 static enum ice_status
1666 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1667                   u16 buf_size, u16 *req_desc, u16 *num_elems,
1668                   struct ice_sq_cd *cd)
1669 {
1670         struct ice_aqc_get_sw_cfg *cmd;
1671         struct ice_aq_desc desc;
1672         enum ice_status status;
1673
1674         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1675         cmd = &desc.params.get_sw_conf;
1676         cmd->element = CPU_TO_LE16(*req_desc);
1677
1678         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1679         if (!status) {
1680                 *req_desc = LE16_TO_CPU(cmd->element);
1681                 *num_elems = LE16_TO_CPU(cmd->num_elems);
1682         }
1683
1684         return status;
1685 }
1686
1687 /**
1688  * ice_alloc_sw - allocate resources specific to switch
1689  * @hw: pointer to the HW struct
1690  * @ena_stats: true to turn on VEB stats
1691  * @shared_res: true for shared resource, false for dedicated resource
1692  * @sw_id: switch ID returned
1693  * @counter_id: VEB counter ID returned
1694  *
1695  * allocates switch resources (SWID and VEB counter) (0x0208)
1696  */
1697 enum ice_status
1698 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1699              u16 *counter_id)
1700 {
1701         struct ice_aqc_alloc_free_res_elem *sw_buf;
1702         struct ice_aqc_res_elem *sw_ele;
1703         enum ice_status status;
1704         u16 buf_len;
1705
1706         buf_len = ice_struct_size(sw_buf, elem, 1);
1707         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1708         if (!sw_buf)
1709                 return ICE_ERR_NO_MEMORY;
1710
1711         /* Prepare buffer for switch ID.
1712          * The number of resource entries in buffer is passed as 1 since only a
1713          * single switch/VEB instance is allocated, and hence a single sw_id
1714          * is requested.
1715          */
1716         sw_buf->num_elems = CPU_TO_LE16(1);
1717         sw_buf->res_type =
1718                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1719                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1720                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1721
1722         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1723                                        ice_aqc_opc_alloc_res, NULL);
1724
1725         if (status)
1726                 goto ice_alloc_sw_exit;
1727
1728         sw_ele = &sw_buf->elem[0];
1729         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1730
1731         if (ena_stats) {
1732                 /* Prepare buffer for VEB Counter */
1733                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1734                 struct ice_aqc_alloc_free_res_elem *counter_buf;
1735                 struct ice_aqc_res_elem *counter_ele;
1736
1737                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1738                                 ice_malloc(hw, buf_len);
1739                 if (!counter_buf) {
1740                         status = ICE_ERR_NO_MEMORY;
1741                         goto ice_alloc_sw_exit;
1742                 }
1743
1744                 /* The number of resource entries in buffer is passed as 1 since
1745                  * only a single switch/VEB instance is allocated, and hence a
1746                  * single VEB counter is requested.
1747                  */
1748                 counter_buf->num_elems = CPU_TO_LE16(1);
1749                 counter_buf->res_type =
1750                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1751                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1752                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1753                                                opc, NULL);
1754
1755                 if (status) {
1756                         ice_free(hw, counter_buf);
1757                         goto ice_alloc_sw_exit;
1758                 }
1759                 counter_ele = &counter_buf->elem[0];
1760                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1761                 ice_free(hw, counter_buf);
1762         }
1763
1764 ice_alloc_sw_exit:
1765         ice_free(hw, sw_buf);
1766         return status;
1767 }
1768
1769 /**
1770  * ice_free_sw - free resources specific to switch
1771  * @hw: pointer to the HW struct
1772  * @sw_id: switch ID returned
1773  * @counter_id: VEB counter ID returned
1774  *
1775  * free switch resources (SWID and VEB counter) (0x0209)
1776  *
1777  * NOTE: This function frees multiple resources. It continues
1778  * releasing other resources even after it encounters error.
1779  * The error code returned is the last error it encountered.
1780  */
1781 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1782 {
1783         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1784         enum ice_status status, ret_status;
1785         u16 buf_len;
1786
1787         buf_len = ice_struct_size(sw_buf, elem, 1);
1788         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1789         if (!sw_buf)
1790                 return ICE_ERR_NO_MEMORY;
1791
1792         /* Prepare buffer to free for switch ID res.
1793          * The number of resource entries in buffer is passed as 1 since only a
1794          * single switch/VEB instance is freed, and hence a single sw_id
1795          * is released.
1796          */
1797         sw_buf->num_elems = CPU_TO_LE16(1);
1798         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1799         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1800
1801         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802                                            ice_aqc_opc_free_res, NULL);
1803
1804         if (ret_status)
1805                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1806
1807         /* Prepare buffer to free for VEB Counter resource */
1808         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1809                         ice_malloc(hw, buf_len);
1810         if (!counter_buf) {
1811                 ice_free(hw, sw_buf);
1812                 return ICE_ERR_NO_MEMORY;
1813         }
1814
1815         /* The number of resource entries in buffer is passed as 1 since only a
1816          * single switch/VEB instance is freed, and hence a single VEB counter
1817          * is released
1818          */
1819         counter_buf->num_elems = CPU_TO_LE16(1);
1820         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1821         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1822
1823         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1824                                        ice_aqc_opc_free_res, NULL);
1825         if (status) {
1826                 ice_debug(hw, ICE_DBG_SW,
1827                           "VEB counter resource could not be freed\n");
1828                 ret_status = status;
1829         }
1830
1831         ice_free(hw, counter_buf);
1832         ice_free(hw, sw_buf);
1833         return ret_status;
1834 }
1835
1836 /**
1837  * ice_aq_add_vsi
1838  * @hw: pointer to the HW struct
1839  * @vsi_ctx: pointer to a VSI context struct
1840  * @cd: pointer to command details structure or NULL
1841  *
1842  * Add a VSI context to the hardware (0x0210)
1843  */
1844 enum ice_status
1845 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1846                struct ice_sq_cd *cd)
1847 {
1848         struct ice_aqc_add_update_free_vsi_resp *res;
1849         struct ice_aqc_add_get_update_free_vsi *cmd;
1850         struct ice_aq_desc desc;
1851         enum ice_status status;
1852
1853         cmd = &desc.params.vsi_cmd;
1854         res = &desc.params.add_update_free_vsi_res;
1855
1856         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1857
1858         if (!vsi_ctx->alloc_from_pool)
1859                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1860                                            ICE_AQ_VSI_IS_VALID);
1861
1862         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1863
1864         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1865
1866         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1867                                  sizeof(vsi_ctx->info), cd);
1868
1869         if (!status) {
1870                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1871                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1872                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1873         }
1874
1875         return status;
1876 }
1877
1878 /**
1879  * ice_aq_free_vsi
1880  * @hw: pointer to the HW struct
1881  * @vsi_ctx: pointer to a VSI context struct
1882  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1883  * @cd: pointer to command details structure or NULL
1884  *
1885  * Free VSI context info from hardware (0x0213)
1886  */
1887 enum ice_status
1888 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1889                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1890 {
1891         struct ice_aqc_add_update_free_vsi_resp *resp;
1892         struct ice_aqc_add_get_update_free_vsi *cmd;
1893         struct ice_aq_desc desc;
1894         enum ice_status status;
1895
1896         cmd = &desc.params.vsi_cmd;
1897         resp = &desc.params.add_update_free_vsi_res;
1898
1899         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1900
1901         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1902         if (keep_vsi_alloc)
1903                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1904
1905         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1906         if (!status) {
1907                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1908                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1909         }
1910
1911         return status;
1912 }
1913
1914 /**
1915  * ice_aq_update_vsi
1916  * @hw: pointer to the HW struct
1917  * @vsi_ctx: pointer to a VSI context struct
1918  * @cd: pointer to command details structure or NULL
1919  *
1920  * Update VSI context in the hardware (0x0211)
1921  */
1922 enum ice_status
1923 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1924                   struct ice_sq_cd *cd)
1925 {
1926         struct ice_aqc_add_update_free_vsi_resp *resp;
1927         struct ice_aqc_add_get_update_free_vsi *cmd;
1928         struct ice_aq_desc desc;
1929         enum ice_status status;
1930
1931         cmd = &desc.params.vsi_cmd;
1932         resp = &desc.params.add_update_free_vsi_res;
1933
1934         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1935
1936         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1937
1938         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1939
1940         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1941                                  sizeof(vsi_ctx->info), cd);
1942
1943         if (!status) {
1944                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1945                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1946         }
1947
1948         return status;
1949 }
1950
1951 /**
1952  * ice_is_vsi_valid - check whether the VSI is valid or not
1953  * @hw: pointer to the HW struct
1954  * @vsi_handle: VSI handle
1955  *
1956  * check whether the VSI is valid or not
1957  */
1958 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1959 {
1960         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1961 }
1962
1963 /**
1964  * ice_get_hw_vsi_num - return the HW VSI number
1965  * @hw: pointer to the HW struct
1966  * @vsi_handle: VSI handle
1967  *
1968  * return the HW VSI number
1969  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1970  */
1971 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1972 {
1973         return hw->vsi_ctx[vsi_handle]->vsi_num;
1974 }
1975
1976 /**
1977  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1978  * @hw: pointer to the HW struct
1979  * @vsi_handle: VSI handle
1980  *
1981  * return the VSI context entry for a given VSI handle
1982  */
1983 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1984 {
1985         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1986 }
1987
1988 /**
1989  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1990  * @hw: pointer to the HW struct
1991  * @vsi_handle: VSI handle
1992  * @vsi: VSI context pointer
1993  *
1994  * save the VSI context entry for a given VSI handle
1995  */
1996 static void
1997 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1998 {
1999         hw->vsi_ctx[vsi_handle] = vsi;
2000 }
2001
2002 /**
2003  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2004  * @hw: pointer to the HW struct
2005  * @vsi_handle: VSI handle
2006  */
2007 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2008 {
2009         struct ice_vsi_ctx *vsi;
2010         u8 i;
2011
2012         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2013         if (!vsi)
2014                 return;
2015         ice_for_each_traffic_class(i) {
2016                 if (vsi->lan_q_ctx[i]) {
2017                         ice_free(hw, vsi->lan_q_ctx[i]);
2018                         vsi->lan_q_ctx[i] = NULL;
2019                 }
2020         }
2021 }
2022
2023 /**
2024  * ice_clear_vsi_ctx - clear the VSI context entry
2025  * @hw: pointer to the HW struct
2026  * @vsi_handle: VSI handle
2027  *
2028  * clear the VSI context entry
2029  */
2030 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2031 {
2032         struct ice_vsi_ctx *vsi;
2033
2034         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2035         if (vsi) {
2036                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2037                 ice_free(hw, vsi);
2038                 hw->vsi_ctx[vsi_handle] = NULL;
2039         }
2040 }
2041
2042 /**
2043  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2044  * @hw: pointer to the HW struct
2045  */
2046 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2047 {
2048         u16 i;
2049
2050         for (i = 0; i < ICE_MAX_VSI; i++)
2051                 ice_clear_vsi_ctx(hw, i);
2052 }
2053
2054 /**
2055  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2056  * @hw: pointer to the HW struct
2057  * @vsi_handle: unique VSI handle provided by drivers
2058  * @vsi_ctx: pointer to a VSI context struct
2059  * @cd: pointer to command details structure or NULL
2060  *
2061  * Add a VSI context to the hardware also add it into the VSI handle list.
2062  * If this function gets called after reset for existing VSIs then update
2063  * with the new HW VSI number in the corresponding VSI handle list entry.
2064  */
2065 enum ice_status
2066 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2067             struct ice_sq_cd *cd)
2068 {
2069         struct ice_vsi_ctx *tmp_vsi_ctx;
2070         enum ice_status status;
2071
2072         if (vsi_handle >= ICE_MAX_VSI)
2073                 return ICE_ERR_PARAM;
2074         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2075         if (status)
2076                 return status;
2077         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2078         if (!tmp_vsi_ctx) {
2079                 /* Create a new VSI context */
2080                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2081                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2082                 if (!tmp_vsi_ctx) {
2083                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2084                         return ICE_ERR_NO_MEMORY;
2085                 }
2086                 *tmp_vsi_ctx = *vsi_ctx;
2087
2088                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2089         } else {
2090                 /* update with new HW VSI num */
2091                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2092         }
2093
2094         return ICE_SUCCESS;
2095 }
2096
2097 /**
2098  * ice_free_vsi- free VSI context from hardware and VSI handle list
2099  * @hw: pointer to the HW struct
2100  * @vsi_handle: unique VSI handle
2101  * @vsi_ctx: pointer to a VSI context struct
2102  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2103  * @cd: pointer to command details structure or NULL
2104  *
2105  * Free VSI context info from hardware as well as from VSI handle list
2106  */
2107 enum ice_status
2108 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2109              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2110 {
2111         enum ice_status status;
2112
2113         if (!ice_is_vsi_valid(hw, vsi_handle))
2114                 return ICE_ERR_PARAM;
2115         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2116         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2117         if (!status)
2118                 ice_clear_vsi_ctx(hw, vsi_handle);
2119         return status;
2120 }
2121
2122 /**
2123  * ice_update_vsi
2124  * @hw: pointer to the HW struct
2125  * @vsi_handle: unique VSI handle
2126  * @vsi_ctx: pointer to a VSI context struct
2127  * @cd: pointer to command details structure or NULL
2128  *
2129  * Update VSI context in the hardware
2130  */
2131 enum ice_status
2132 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2133                struct ice_sq_cd *cd)
2134 {
2135         if (!ice_is_vsi_valid(hw, vsi_handle))
2136                 return ICE_ERR_PARAM;
2137         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2138         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2139 }
2140
2141 /**
2142  * ice_aq_get_vsi_params
2143  * @hw: pointer to the HW struct
2144  * @vsi_ctx: pointer to a VSI context struct
2145  * @cd: pointer to command details structure or NULL
2146  *
2147  * Get VSI context info from hardware (0x0212)
2148  */
2149 enum ice_status
2150 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2151                       struct ice_sq_cd *cd)
2152 {
2153         struct ice_aqc_add_get_update_free_vsi *cmd;
2154         struct ice_aqc_get_vsi_resp *resp;
2155         struct ice_aq_desc desc;
2156         enum ice_status status;
2157
2158         cmd = &desc.params.vsi_cmd;
2159         resp = &desc.params.get_vsi_resp;
2160
2161         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2162
2163         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2164
2165         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2166                                  sizeof(vsi_ctx->info), cd);
2167         if (!status) {
2168                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2169                                         ICE_AQ_VSI_NUM_M;
2170                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2171                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2172         }
2173
2174         return status;
2175 }
2176
2177 /**
2178  * ice_aq_add_update_mir_rule - add/update a mirror rule
2179  * @hw: pointer to the HW struct
2180  * @rule_type: Rule Type
2181  * @dest_vsi: VSI number to which packets will be mirrored
2182  * @count: length of the list
2183  * @mr_buf: buffer for list of mirrored VSI numbers
2184  * @cd: pointer to command details structure or NULL
2185  * @rule_id: Rule ID
2186  *
2187  * Add/Update Mirror Rule (0x260).
2188  */
2189 enum ice_status
2190 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2191                            u16 count, struct ice_mir_rule_buf *mr_buf,
2192                            struct ice_sq_cd *cd, u16 *rule_id)
2193 {
2194         struct ice_aqc_add_update_mir_rule *cmd;
2195         struct ice_aq_desc desc;
2196         enum ice_status status;
2197         __le16 *mr_list = NULL;
2198         u16 buf_size = 0;
2199
2200         switch (rule_type) {
2201         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2202         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2203                 /* Make sure count and mr_buf are set for these rule_types */
2204                 if (!(count && mr_buf))
2205                         return ICE_ERR_PARAM;
2206
2207                 buf_size = count * sizeof(__le16);
2208                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2209                 if (!mr_list)
2210                         return ICE_ERR_NO_MEMORY;
2211                 break;
2212         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2213         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2214                 /* Make sure count and mr_buf are not set for these
2215                  * rule_types
2216                  */
2217                 if (count || mr_buf)
2218                         return ICE_ERR_PARAM;
2219                 break;
2220         default:
2221                 ice_debug(hw, ICE_DBG_SW,
2222                           "Error due to unsupported rule_type %u\n", rule_type);
2223                 return ICE_ERR_OUT_OF_RANGE;
2224         }
2225
2226         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2227
2228         /* Pre-process 'mr_buf' items for add/update of virtual port
2229          * ingress/egress mirroring (but not physical port ingress/egress
2230          * mirroring)
2231          */
2232         if (mr_buf) {
2233                 int i;
2234
2235                 for (i = 0; i < count; i++) {
2236                         u16 id;
2237
2238                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2239
2240                         /* Validate specified VSI number, make sure it is less
2241                          * than ICE_MAX_VSI, if not return with error.
2242                          */
2243                         if (id >= ICE_MAX_VSI) {
2244                                 ice_debug(hw, ICE_DBG_SW,
2245                                           "Error VSI index (%u) out-of-range\n",
2246                                           id);
2247                                 ice_free(hw, mr_list);
2248                                 return ICE_ERR_OUT_OF_RANGE;
2249                         }
2250
2251                         /* add VSI to mirror rule */
2252                         if (mr_buf[i].add)
2253                                 mr_list[i] =
2254                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2255                         else /* remove VSI from mirror rule */
2256                                 mr_list[i] = CPU_TO_LE16(id);
2257                 }
2258         }
2259
2260         cmd = &desc.params.add_update_rule;
2261         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2262                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2263                                            ICE_AQC_RULE_ID_VALID_M);
2264         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2265         cmd->num_entries = CPU_TO_LE16(count);
2266         cmd->dest = CPU_TO_LE16(dest_vsi);
2267
2268         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2269         if (!status)
2270                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2271
2272         ice_free(hw, mr_list);
2273
2274         return status;
2275 }
2276
2277 /**
2278  * ice_aq_delete_mir_rule - delete a mirror rule
2279  * @hw: pointer to the HW struct
2280  * @rule_id: Mirror rule ID (to be deleted)
2281  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2282  *               otherwise it is returned to the shared pool
2283  * @cd: pointer to command details structure or NULL
2284  *
2285  * Delete Mirror Rule (0x261).
2286  */
2287 enum ice_status
2288 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2289                        struct ice_sq_cd *cd)
2290 {
2291         struct ice_aqc_delete_mir_rule *cmd;
2292         struct ice_aq_desc desc;
2293
2294         /* rule_id should be in the range 0...63 */
2295         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2296                 return ICE_ERR_OUT_OF_RANGE;
2297
2298         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2299
2300         cmd = &desc.params.del_rule;
2301         rule_id |= ICE_AQC_RULE_ID_VALID_M;
2302         cmd->rule_id = CPU_TO_LE16(rule_id);
2303
2304         if (keep_allocd)
2305                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2306
2307         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2308 }
2309
2310 /**
2311  * ice_aq_alloc_free_vsi_list
2312  * @hw: pointer to the HW struct
2313  * @vsi_list_id: VSI list ID returned or used for lookup
2314  * @lkup_type: switch rule filter lookup type
2315  * @opc: switch rules population command type - pass in the command opcode
2316  *
2317  * allocates or free a VSI list resource
2318  */
2319 static enum ice_status
2320 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2321                            enum ice_sw_lkup_type lkup_type,
2322                            enum ice_adminq_opc opc)
2323 {
2324         struct ice_aqc_alloc_free_res_elem *sw_buf;
2325         struct ice_aqc_res_elem *vsi_ele;
2326         enum ice_status status;
2327         u16 buf_len;
2328
2329         buf_len = ice_struct_size(sw_buf, elem, 1);
2330         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2331         if (!sw_buf)
2332                 return ICE_ERR_NO_MEMORY;
2333         sw_buf->num_elems = CPU_TO_LE16(1);
2334
2335         if (lkup_type == ICE_SW_LKUP_MAC ||
2336             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2337             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2338             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2339             lkup_type == ICE_SW_LKUP_PROMISC ||
2340             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2341             lkup_type == ICE_SW_LKUP_LAST) {
2342                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2343         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2344                 sw_buf->res_type =
2345                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2346         } else {
2347                 status = ICE_ERR_PARAM;
2348                 goto ice_aq_alloc_free_vsi_list_exit;
2349         }
2350
2351         if (opc == ice_aqc_opc_free_res)
2352                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2353
2354         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2355         if (status)
2356                 goto ice_aq_alloc_free_vsi_list_exit;
2357
2358         if (opc == ice_aqc_opc_alloc_res) {
2359                 vsi_ele = &sw_buf->elem[0];
2360                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2361         }
2362
2363 ice_aq_alloc_free_vsi_list_exit:
2364         ice_free(hw, sw_buf);
2365         return status;
2366 }
2367
2368 /**
2369  * ice_aq_set_storm_ctrl - Sets storm control configuration
2370  * @hw: pointer to the HW struct
2371  * @bcast_thresh: represents the upper threshold for broadcast storm control
2372  * @mcast_thresh: represents the upper threshold for multicast storm control
2373  * @ctl_bitmask: storm control control knobs
2374  *
2375  * Sets the storm control configuration (0x0280)
2376  */
2377 enum ice_status
2378 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2379                       u32 ctl_bitmask)
2380 {
2381         struct ice_aqc_storm_cfg *cmd;
2382         struct ice_aq_desc desc;
2383
2384         cmd = &desc.params.storm_conf;
2385
2386         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2387
2388         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2389         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2390         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2391
2392         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2393 }
2394
2395 /**
2396  * ice_aq_get_storm_ctrl - gets storm control configuration
2397  * @hw: pointer to the HW struct
2398  * @bcast_thresh: represents the upper threshold for broadcast storm control
2399  * @mcast_thresh: represents the upper threshold for multicast storm control
2400  * @ctl_bitmask: storm control control knobs
2401  *
2402  * Gets the storm control configuration (0x0281)
2403  */
2404 enum ice_status
2405 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2406                       u32 *ctl_bitmask)
2407 {
2408         enum ice_status status;
2409         struct ice_aq_desc desc;
2410
2411         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2412
2413         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2414         if (!status) {
2415                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2416
2417                 if (bcast_thresh)
2418                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2419                                 ICE_AQ_THRESHOLD_M;
2420                 if (mcast_thresh)
2421                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2422                                 ICE_AQ_THRESHOLD_M;
2423                 if (ctl_bitmask)
2424                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2425         }
2426
2427         return status;
2428 }
2429
2430 /**
2431  * ice_aq_sw_rules - add/update/remove switch rules
2432  * @hw: pointer to the HW struct
2433  * @rule_list: pointer to switch rule population list
2434  * @rule_list_sz: total size of the rule list in bytes
2435  * @num_rules: number of switch rules in the rule_list
2436  * @opc: switch rules population command type - pass in the command opcode
2437  * @cd: pointer to command details structure or NULL
2438  *
2439  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2440  */
2441 static enum ice_status
2442 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2443                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2444 {
2445         struct ice_aq_desc desc;
2446         enum ice_status status;
2447
2448         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2449
2450         if (opc != ice_aqc_opc_add_sw_rules &&
2451             opc != ice_aqc_opc_update_sw_rules &&
2452             opc != ice_aqc_opc_remove_sw_rules)
2453                 return ICE_ERR_PARAM;
2454
2455         ice_fill_dflt_direct_cmd_desc(&desc, opc);
2456
2457         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2458         desc.params.sw_rules.num_rules_fltr_entry_index =
2459                 CPU_TO_LE16(num_rules);
2460         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2461         if (opc != ice_aqc_opc_add_sw_rules &&
2462             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2463                 status = ICE_ERR_DOES_NOT_EXIST;
2464
2465         return status;
2466 }
2467
2468 /**
2469  * ice_aq_add_recipe - add switch recipe
2470  * @hw: pointer to the HW struct
2471  * @s_recipe_list: pointer to switch rule population list
2472  * @num_recipes: number of switch recipes in the list
2473  * @cd: pointer to command details structure or NULL
2474  *
2475  * Add(0x0290)
2476  */
2477 enum ice_status
2478 ice_aq_add_recipe(struct ice_hw *hw,
2479                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2480                   u16 num_recipes, struct ice_sq_cd *cd)
2481 {
2482         struct ice_aqc_add_get_recipe *cmd;
2483         struct ice_aq_desc desc;
2484         u16 buf_size;
2485
2486         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2487         cmd = &desc.params.add_get_recipe;
2488         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2489
2490         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2491         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2492
2493         buf_size = num_recipes * sizeof(*s_recipe_list);
2494
2495         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2496 }
2497
2498 /**
2499  * ice_aq_get_recipe - get switch recipe
2500  * @hw: pointer to the HW struct
2501  * @s_recipe_list: pointer to switch rule population list
2502  * @num_recipes: pointer to the number of recipes (input and output)
2503  * @recipe_root: root recipe number of recipe(s) to retrieve
2504  * @cd: pointer to command details structure or NULL
2505  *
2506  * Get(0x0292)
2507  *
2508  * On input, *num_recipes should equal the number of entries in s_recipe_list.
2509  * On output, *num_recipes will equal the number of entries returned in
2510  * s_recipe_list.
2511  *
2512  * The caller must supply enough space in s_recipe_list to hold all possible
2513  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2514  */
2515 enum ice_status
2516 ice_aq_get_recipe(struct ice_hw *hw,
2517                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2518                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2519 {
2520         struct ice_aqc_add_get_recipe *cmd;
2521         struct ice_aq_desc desc;
2522         enum ice_status status;
2523         u16 buf_size;
2524
2525         if (*num_recipes != ICE_MAX_NUM_RECIPES)
2526                 return ICE_ERR_PARAM;
2527
2528         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2529         cmd = &desc.params.add_get_recipe;
2530         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2531
2532         cmd->return_index = CPU_TO_LE16(recipe_root);
2533         cmd->num_sub_recipes = 0;
2534
2535         buf_size = *num_recipes * sizeof(*s_recipe_list);
2536
2537         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2538         /* cppcheck-suppress constArgument */
2539         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2540
2541         return status;
2542 }
2543
2544 /**
2545  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2546  * @hw: pointer to the HW struct
2547  * @profile_id: package profile ID to associate the recipe with
2548  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2549  * @cd: pointer to command details structure or NULL
2550  * Recipe to profile association (0x0291)
2551  */
2552 enum ice_status
2553 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2554                              struct ice_sq_cd *cd)
2555 {
2556         struct ice_aqc_recipe_to_profile *cmd;
2557         struct ice_aq_desc desc;
2558
2559         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2560         cmd = &desc.params.recipe_to_profile;
2561         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2562         cmd->profile_id = CPU_TO_LE16(profile_id);
2563         /* Set the recipe ID bit in the bitmask to let the device know which
2564          * profile we are associating the recipe to
2565          */
2566         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2567                    ICE_NONDMA_TO_NONDMA);
2568
2569         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2570 }
2571
2572 /**
2573  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2574  * @hw: pointer to the HW struct
2575  * @profile_id: package profile ID to associate the recipe with
2576  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2577  * @cd: pointer to command details structure or NULL
2578  * Associate profile ID with given recipe (0x0293)
2579  */
2580 enum ice_status
2581 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2582                              struct ice_sq_cd *cd)
2583 {
2584         struct ice_aqc_recipe_to_profile *cmd;
2585         struct ice_aq_desc desc;
2586         enum ice_status status;
2587
2588         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2589         cmd = &desc.params.recipe_to_profile;
2590         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2591         cmd->profile_id = CPU_TO_LE16(profile_id);
2592
2593         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2594         if (!status)
2595                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2596                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2597
2598         return status;
2599 }
2600
2601 /**
2602  * ice_alloc_recipe - add recipe resource
2603  * @hw: pointer to the hardware structure
2604  * @rid: recipe ID returned as response to AQ call
2605  */
2606 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2607 {
2608         struct ice_aqc_alloc_free_res_elem *sw_buf;
2609         enum ice_status status;
2610         u16 buf_len;
2611
2612         buf_len = ice_struct_size(sw_buf, elem, 1);
2613         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2614         if (!sw_buf)
2615                 return ICE_ERR_NO_MEMORY;
2616
2617         sw_buf->num_elems = CPU_TO_LE16(1);
2618         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2619                                         ICE_AQC_RES_TYPE_S) |
2620                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
2621         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2622                                        ice_aqc_opc_alloc_res, NULL);
2623         if (!status)
2624                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2625         ice_free(hw, sw_buf);
2626
2627         return status;
2628 }
2629
2630 /* ice_init_port_info - Initialize port_info with switch configuration data
2631  * @pi: pointer to port_info
2632  * @vsi_port_num: VSI number or port number
2633  * @type: Type of switch element (port or VSI)
2634  * @swid: switch ID of the switch the element is attached to
2635  * @pf_vf_num: PF or VF number
2636  * @is_vf: true if the element is a VF, false otherwise
2637  */
2638 static void
2639 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2640                    u16 swid, u16 pf_vf_num, bool is_vf)
2641 {
2642         switch (type) {
2643         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2644                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2645                 pi->sw_id = swid;
2646                 pi->pf_vf_num = pf_vf_num;
2647                 pi->is_vf = is_vf;
2648                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2649                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2650                 break;
2651         default:
2652                 ice_debug(pi->hw, ICE_DBG_SW,
2653                           "incorrect VSI/port type received\n");
2654                 break;
2655         }
2656 }
2657
2658 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2659  * @hw: pointer to the hardware structure
2660  */
2661 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2662 {
2663         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2664         enum ice_status status;
2665         u8 num_total_ports;
2666         u16 req_desc = 0;
2667         u16 num_elems;
2668         u8 j = 0;
2669         u16 i;
2670
2671         num_total_ports = 1;
2672
2673         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2674                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2675
2676         if (!rbuf)
2677                 return ICE_ERR_NO_MEMORY;
2678
2679         /* Multiple calls to ice_aq_get_sw_cfg may be required
2680          * to get all the switch configuration information. The need
2681          * for additional calls is indicated by ice_aq_get_sw_cfg
2682          * writing a non-zero value in req_desc
2683          */
2684         do {
2685                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2686
2687                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2688                                            &req_desc, &num_elems, NULL);
2689
2690                 if (status)
2691                         break;
2692
2693                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2694                         u16 pf_vf_num, swid, vsi_port_num;
2695                         bool is_vf = false;
2696                         u8 res_type;
2697
2698                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2699                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2700
2701                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2702                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2703
2704                         swid = LE16_TO_CPU(ele->swid);
2705
2706                         if (LE16_TO_CPU(ele->pf_vf_num) &
2707                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2708                                 is_vf = true;
2709
2710                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2711                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2712
2713                         switch (res_type) {
2714                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2715                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2716                                 if (j == num_total_ports) {
2717                                         ice_debug(hw, ICE_DBG_SW,
2718                                                   "more ports than expected\n");
2719                                         status = ICE_ERR_CFG;
2720                                         goto out;
2721                                 }
2722                                 ice_init_port_info(hw->port_info,
2723                                                    vsi_port_num, res_type, swid,
2724                                                    pf_vf_num, is_vf);
2725                                 j++;
2726                                 break;
2727                         default:
2728                                 break;
2729                         }
2730                 }
2731         } while (req_desc && !status);
2732
2733 out:
2734         ice_free(hw, (void *)rbuf);
2735         return status;
2736 }
2737
2738 /**
2739  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2740  * @hw: pointer to the hardware structure
2741  * @fi: filter info structure to fill/update
2742  *
2743  * This helper function populates the lb_en and lan_en elements of the provided
2744  * ice_fltr_info struct using the switch's type and characteristics of the
2745  * switch rule being configured.
2746  */
2747 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2748 {
2749         if ((fi->flag & ICE_FLTR_RX) &&
2750             (fi->fltr_act == ICE_FWD_TO_VSI ||
2751              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2752             fi->lkup_type == ICE_SW_LKUP_LAST)
2753                 fi->lan_en = true;
2754         fi->lb_en = false;
2755         fi->lan_en = false;
2756         if ((fi->flag & ICE_FLTR_TX) &&
2757             (fi->fltr_act == ICE_FWD_TO_VSI ||
2758              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2759              fi->fltr_act == ICE_FWD_TO_Q ||
2760              fi->fltr_act == ICE_FWD_TO_QGRP)) {
2761                 /* Setting LB for prune actions will result in replicated
2762                  * packets to the internal switch that will be dropped.
2763                  */
2764                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2765                         fi->lb_en = true;
2766
2767                 /* Set lan_en to TRUE if
2768                  * 1. The switch is a VEB AND
2769                  * 2
2770                  * 2.1 The lookup is a directional lookup like ethertype,
2771                  * promiscuous, ethertype-MAC, promiscuous-VLAN
2772                  * and default-port OR
2773                  * 2.2 The lookup is VLAN, OR
2774                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2775                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2776                  *
2777                  * OR
2778                  *
2779                  * The switch is a VEPA.
2780                  *
2781                  * In all other cases, the LAN enable has to be set to false.
2782                  */
2783                 if (hw->evb_veb) {
2784                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2785                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2786                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2787                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2788                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
2789                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
2790                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
2791                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2792                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2793                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2794                                 fi->lan_en = true;
2795                 } else {
2796                         fi->lan_en = true;
2797                 }
2798         }
2799 }
2800
2801 /**
2802  * ice_fill_sw_rule - Helper function to fill switch rule structure
2803  * @hw: pointer to the hardware structure
2804  * @f_info: entry containing packet forwarding information
2805  * @s_rule: switch rule structure to be filled in based on mac_entry
2806  * @opc: switch rules population command type - pass in the command opcode
2807  */
2808 static void
2809 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2810                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2811 {
2812         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2813         void *daddr = NULL;
2814         u16 eth_hdr_sz;
2815         u8 *eth_hdr;
2816         u32 act = 0;
2817         __be16 *off;
2818         u8 q_rgn;
2819
2820         if (opc == ice_aqc_opc_remove_sw_rules) {
2821                 s_rule->pdata.lkup_tx_rx.act = 0;
2822                 s_rule->pdata.lkup_tx_rx.index =
2823                         CPU_TO_LE16(f_info->fltr_rule_id);
2824                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2825                 return;
2826         }
2827
2828         eth_hdr_sz = sizeof(dummy_eth_header);
2829         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2830
2831         /* initialize the ether header with a dummy header */
2832         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2833         ice_fill_sw_info(hw, f_info);
2834
2835         switch (f_info->fltr_act) {
2836         case ICE_FWD_TO_VSI:
2837                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2838                         ICE_SINGLE_ACT_VSI_ID_M;
2839                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2840                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2841                                 ICE_SINGLE_ACT_VALID_BIT;
2842                 break;
2843         case ICE_FWD_TO_VSI_LIST:
2844                 act |= ICE_SINGLE_ACT_VSI_LIST;
2845                 act |= (f_info->fwd_id.vsi_list_id <<
2846                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2847                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
2848                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2849                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2850                                 ICE_SINGLE_ACT_VALID_BIT;
2851                 break;
2852         case ICE_FWD_TO_Q:
2853                 act |= ICE_SINGLE_ACT_TO_Q;
2854                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2855                         ICE_SINGLE_ACT_Q_INDEX_M;
2856                 break;
2857         case ICE_DROP_PACKET:
2858                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2859                         ICE_SINGLE_ACT_VALID_BIT;
2860                 break;
2861         case ICE_FWD_TO_QGRP:
2862                 q_rgn = f_info->qgrp_size > 0 ?
2863                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
2864                 act |= ICE_SINGLE_ACT_TO_Q;
2865                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2866                         ICE_SINGLE_ACT_Q_INDEX_M;
2867                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2868                         ICE_SINGLE_ACT_Q_REGION_M;
2869                 break;
2870         default:
2871                 return;
2872         }
2873
2874         if (f_info->lb_en)
2875                 act |= ICE_SINGLE_ACT_LB_ENABLE;
2876         if (f_info->lan_en)
2877                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2878
2879         switch (f_info->lkup_type) {
2880         case ICE_SW_LKUP_MAC:
2881                 daddr = f_info->l_data.mac.mac_addr;
2882                 break;
2883         case ICE_SW_LKUP_VLAN:
2884                 vlan_id = f_info->l_data.vlan.vlan_id;
2885                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2886                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2887                         act |= ICE_SINGLE_ACT_PRUNE;
2888                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2889                 }
2890                 break;
2891         case ICE_SW_LKUP_ETHERTYPE_MAC:
2892                 daddr = f_info->l_data.ethertype_mac.mac_addr;
2893                 /* fall-through */
2894         case ICE_SW_LKUP_ETHERTYPE:
2895                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2896                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2897                 break;
2898         case ICE_SW_LKUP_MAC_VLAN:
2899                 daddr = f_info->l_data.mac_vlan.mac_addr;
2900                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2901                 break;
2902         case ICE_SW_LKUP_PROMISC_VLAN:
2903                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2904                 /* fall-through */
2905         case ICE_SW_LKUP_PROMISC:
2906                 daddr = f_info->l_data.mac_vlan.mac_addr;
2907                 break;
2908         default:
2909                 break;
2910         }
2911
2912         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2913                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2914                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2915
2916         /* Recipe set depending on lookup type */
2917         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2918         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2919         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2920
2921         if (daddr)
2922                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2923                            ICE_NONDMA_TO_NONDMA);
2924
2925         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2926                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2927                 *off = CPU_TO_BE16(vlan_id);
2928         }
2929
2930         /* Create the switch rule with the final dummy Ethernet header */
2931         if (opc != ice_aqc_opc_update_sw_rules)
2932                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2933 }
2934
2935 /**
2936  * ice_add_marker_act
2937  * @hw: pointer to the hardware structure
2938  * @m_ent: the management entry for which sw marker needs to be added
2939  * @sw_marker: sw marker to tag the Rx descriptor with
2940  * @l_id: large action resource ID
2941  *
2942  * Create a large action to hold software marker and update the switch rule
2943  * entry pointed by m_ent with newly created large action
2944  */
2945 static enum ice_status
2946 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2947                    u16 sw_marker, u16 l_id)
2948 {
2949         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2950         /* For software marker we need 3 large actions
2951          * 1. FWD action: FWD TO VSI or VSI LIST
2952          * 2. GENERIC VALUE action to hold the profile ID
2953          * 3. GENERIC VALUE action to hold the software marker ID
2954          */
2955         const u16 num_lg_acts = 3;
2956         enum ice_status status;
2957         u16 lg_act_size;
2958         u16 rules_size;
2959         u32 act;
2960         u16 id;
2961
2962         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2963                 return ICE_ERR_PARAM;
2964
2965         /* Create two back-to-back switch rules and submit them to the HW using
2966          * one memory buffer:
2967          *    1. Large Action
2968          *    2. Look up Tx Rx
2969          */
2970         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2971         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2972         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2973         if (!lg_act)
2974                 return ICE_ERR_NO_MEMORY;
2975
2976         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2977
2978         /* Fill in the first switch rule i.e. large action */
2979         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2980         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2981         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2982
2983         /* First action VSI forwarding or VSI list forwarding depending on how
2984          * many VSIs
2985          */
2986         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2987                 m_ent->fltr_info.fwd_id.hw_vsi_id;
2988
2989         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2990         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2991                 ICE_LG_ACT_VSI_LIST_ID_M;
2992         if (m_ent->vsi_count > 1)
2993                 act |= ICE_LG_ACT_VSI_LIST;
2994         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2995
2996         /* Second action descriptor type */
2997         act = ICE_LG_ACT_GENERIC;
2998
2999         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3000         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3001
3002         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3003                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3004
3005         /* Third action Marker value */
3006         act |= ICE_LG_ACT_GENERIC;
3007         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3008                 ICE_LG_ACT_GENERIC_VALUE_M;
3009
3010         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3011
3012         /* call the fill switch rule to fill the lookup Tx Rx structure */
3013         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3014                          ice_aqc_opc_update_sw_rules);
3015
3016         /* Update the action to point to the large action ID */
3017         rx_tx->pdata.lkup_tx_rx.act =
3018                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3019                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3020                              ICE_SINGLE_ACT_PTR_VAL_M));
3021
3022         /* Use the filter rule ID of the previously created rule with single
3023          * act. Once the update happens, hardware will treat this as large
3024          * action
3025          */
3026         rx_tx->pdata.lkup_tx_rx.index =
3027                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3028
3029         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3030                                  ice_aqc_opc_update_sw_rules, NULL);
3031         if (!status) {
3032                 m_ent->lg_act_idx = l_id;
3033                 m_ent->sw_marker_id = sw_marker;
3034         }
3035
3036         ice_free(hw, lg_act);
3037         return status;
3038 }
3039
3040 /**
3041  * ice_add_counter_act - add/update filter rule with counter action
3042  * @hw: pointer to the hardware structure
3043  * @m_ent: the management entry for which counter needs to be added
3044  * @counter_id: VLAN counter ID returned as part of allocate resource
3045  * @l_id: large action resource ID
3046  */
3047 static enum ice_status
3048 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3049                     u16 counter_id, u16 l_id)
3050 {
3051         struct ice_aqc_sw_rules_elem *lg_act;
3052         struct ice_aqc_sw_rules_elem *rx_tx;
3053         enum ice_status status;
3054         /* 2 actions will be added while adding a large action counter */
3055         const int num_acts = 2;
3056         u16 lg_act_size;
3057         u16 rules_size;
3058         u16 f_rule_id;
3059         u32 act;
3060         u16 id;
3061
3062         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3063                 return ICE_ERR_PARAM;
3064
3065         /* Create two back-to-back switch rules and submit them to the HW using
3066          * one memory buffer:
3067          * 1. Large Action
3068          * 2. Look up Tx Rx
3069          */
3070         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3071         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3072         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
3073                                                                  rules_size);
3074         if (!lg_act)
3075                 return ICE_ERR_NO_MEMORY;
3076
3077         rx_tx = (struct ice_aqc_sw_rules_elem *)
3078                 ((u8 *)lg_act + lg_act_size);
3079
3080         /* Fill in the first switch rule i.e. large action */
3081         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3082         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3083         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3084
3085         /* First action VSI forwarding or VSI list forwarding depending on how
3086          * many VSIs
3087          */
3088         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3089                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3090
3091         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3092         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3093                 ICE_LG_ACT_VSI_LIST_ID_M;
3094         if (m_ent->vsi_count > 1)
3095                 act |= ICE_LG_ACT_VSI_LIST;
3096         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3097
3098         /* Second action counter ID */
3099         act = ICE_LG_ACT_STAT_COUNT;
3100         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3101                 ICE_LG_ACT_STAT_COUNT_M;
3102         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3103
3104         /* call the fill switch rule to fill the lookup Tx Rx structure */
3105         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3106                          ice_aqc_opc_update_sw_rules);
3107
3108         act = ICE_SINGLE_ACT_PTR;
3109         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3110         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3111
3112         /* Use the filter rule ID of the previously created rule with single
3113          * act. Once the update happens, hardware will treat this as large
3114          * action
3115          */
3116         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3117         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3118
3119         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3120                                  ice_aqc_opc_update_sw_rules, NULL);
3121         if (!status) {
3122                 m_ent->lg_act_idx = l_id;
3123                 m_ent->counter_index = counter_id;
3124         }
3125
3126         ice_free(hw, lg_act);
3127         return status;
3128 }
3129
3130 /**
3131  * ice_create_vsi_list_map
3132  * @hw: pointer to the hardware structure
3133  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3134  * @num_vsi: number of VSI handles in the array
3135  * @vsi_list_id: VSI list ID generated as part of allocate resource
3136  *
3137  * Helper function to create a new entry of VSI list ID to VSI mapping
3138  * using the given VSI list ID
3139  */
3140 static struct ice_vsi_list_map_info *
3141 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3142                         u16 vsi_list_id)
3143 {
3144         struct ice_switch_info *sw = hw->switch_info;
3145         struct ice_vsi_list_map_info *v_map;
3146         int i;
3147
3148         v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3149                 sizeof(*v_map));
3150         if (!v_map)
3151                 return NULL;
3152
3153         v_map->vsi_list_id = vsi_list_id;
3154         v_map->ref_cnt = 1;
3155         for (i = 0; i < num_vsi; i++)
3156                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3157
3158         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3159         return v_map;
3160 }
3161
3162 /**
3163  * ice_update_vsi_list_rule
3164  * @hw: pointer to the hardware structure
3165  * @vsi_handle_arr: array of VSI handles to form a VSI list
3166  * @num_vsi: number of VSI handles in the array
3167  * @vsi_list_id: VSI list ID generated as part of allocate resource
3168  * @remove: Boolean value to indicate if this is a remove action
3169  * @opc: switch rules population command type - pass in the command opcode
3170  * @lkup_type: lookup type of the filter
3171  *
3172  * Call AQ command to add a new switch rule or update existing switch rule
3173  * using the given VSI list ID
3174  */
3175 static enum ice_status
3176 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3177                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3178                          enum ice_sw_lkup_type lkup_type)
3179 {
3180         struct ice_aqc_sw_rules_elem *s_rule;
3181         enum ice_status status;
3182         u16 s_rule_size;
3183         u16 rule_type;
3184         int i;
3185
3186         if (!num_vsi)
3187                 return ICE_ERR_PARAM;
3188
3189         if (lkup_type == ICE_SW_LKUP_MAC ||
3190             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3191             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3192             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3193             lkup_type == ICE_SW_LKUP_PROMISC ||
3194             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3195             lkup_type == ICE_SW_LKUP_LAST)
3196                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3197                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3198         else if (lkup_type == ICE_SW_LKUP_VLAN)
3199                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3200                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3201         else
3202                 return ICE_ERR_PARAM;
3203
3204         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3205         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3206         if (!s_rule)
3207                 return ICE_ERR_NO_MEMORY;
3208         for (i = 0; i < num_vsi; i++) {
3209                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3210                         status = ICE_ERR_PARAM;
3211                         goto exit;
3212                 }
3213                 /* AQ call requires hw_vsi_id(s) */
3214                 s_rule->pdata.vsi_list.vsi[i] =
3215                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3216         }
3217
3218         s_rule->type = CPU_TO_LE16(rule_type);
3219         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3220         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3221
3222         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3223
3224 exit:
3225         ice_free(hw, s_rule);
3226         return status;
3227 }
3228
3229 /**
3230  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3231  * @hw: pointer to the HW struct
3232  * @vsi_handle_arr: array of VSI handles to form a VSI list
3233  * @num_vsi: number of VSI handles in the array
3234  * @vsi_list_id: stores the ID of the VSI list to be created
3235  * @lkup_type: switch rule filter's lookup type
3236  */
3237 static enum ice_status
3238 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3239                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3240 {
3241         enum ice_status status;
3242
3243         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3244                                             ice_aqc_opc_alloc_res);
3245         if (status)
3246                 return status;
3247
3248         /* Update the newly created VSI list to include the specified VSIs */
3249         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3250                                         *vsi_list_id, false,
3251                                         ice_aqc_opc_add_sw_rules, lkup_type);
3252 }
3253
3254 /**
3255  * ice_create_pkt_fwd_rule
3256  * @hw: pointer to the hardware structure
3257  * @recp_list: corresponding filter management list
3258  * @f_entry: entry containing packet forwarding information
3259  *
3260  * Create switch rule with given filter information and add an entry
3261  * to the corresponding filter management list to track this switch rule
3262  * and VSI mapping
3263  */
3264 static enum ice_status
3265 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3266                         struct ice_fltr_list_entry *f_entry)
3267 {
3268         struct ice_fltr_mgmt_list_entry *fm_entry;
3269         struct ice_aqc_sw_rules_elem *s_rule;
3270         enum ice_status status;
3271
3272         s_rule = (struct ice_aqc_sw_rules_elem *)
3273                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3274         if (!s_rule)
3275                 return ICE_ERR_NO_MEMORY;
3276         fm_entry = (struct ice_fltr_mgmt_list_entry *)
3277                    ice_malloc(hw, sizeof(*fm_entry));
3278         if (!fm_entry) {
3279                 status = ICE_ERR_NO_MEMORY;
3280                 goto ice_create_pkt_fwd_rule_exit;
3281         }
3282
3283         fm_entry->fltr_info = f_entry->fltr_info;
3284
3285         /* Initialize all the fields for the management entry */
3286         fm_entry->vsi_count = 1;
3287         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3288         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3289         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3290
3291         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3292                          ice_aqc_opc_add_sw_rules);
3293
3294         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3295                                  ice_aqc_opc_add_sw_rules, NULL);
3296         if (status) {
3297                 ice_free(hw, fm_entry);
3298                 goto ice_create_pkt_fwd_rule_exit;
3299         }
3300
3301         f_entry->fltr_info.fltr_rule_id =
3302                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3303         fm_entry->fltr_info.fltr_rule_id =
3304                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3305
3306         /* The book keeping entries will get removed when base driver
3307          * calls remove filter AQ command
3308          */
3309         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3310
3311 ice_create_pkt_fwd_rule_exit:
3312         ice_free(hw, s_rule);
3313         return status;
3314 }
3315
3316 /**
3317  * ice_update_pkt_fwd_rule
3318  * @hw: pointer to the hardware structure
3319  * @f_info: filter information for switch rule
3320  *
3321  * Call AQ command to update a previously created switch rule with a
3322  * VSI list ID
3323  */
3324 static enum ice_status
3325 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3326 {
3327         struct ice_aqc_sw_rules_elem *s_rule;
3328         enum ice_status status;
3329
3330         s_rule = (struct ice_aqc_sw_rules_elem *)
3331                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3332         if (!s_rule)
3333                 return ICE_ERR_NO_MEMORY;
3334
3335         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3336
3337         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3338
3339         /* Update switch rule with new rule set to forward VSI list */
3340         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3341                                  ice_aqc_opc_update_sw_rules, NULL);
3342
3343         ice_free(hw, s_rule);
3344         return status;
3345 }
3346
3347 /**
3348  * ice_update_sw_rule_bridge_mode
3349  * @hw: pointer to the HW struct
3350  *
3351  * Updates unicast switch filter rules based on VEB/VEPA mode
3352  */
3353 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3354 {
3355         struct ice_switch_info *sw = hw->switch_info;
3356         struct ice_fltr_mgmt_list_entry *fm_entry;
3357         enum ice_status status = ICE_SUCCESS;
3358         struct LIST_HEAD_TYPE *rule_head;
3359         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3360
3361         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3362         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3363
3364         ice_acquire_lock(rule_lock);
3365         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3366                             list_entry) {
3367                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3368                 u8 *addr = fi->l_data.mac.mac_addr;
3369
3370                 /* Update unicast Tx rules to reflect the selected
3371                  * VEB/VEPA mode
3372                  */
3373                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3374                     (fi->fltr_act == ICE_FWD_TO_VSI ||
3375                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3376                      fi->fltr_act == ICE_FWD_TO_Q ||
3377                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
3378                         status = ice_update_pkt_fwd_rule(hw, fi);
3379                         if (status)
3380                                 break;
3381                 }
3382         }
3383
3384         ice_release_lock(rule_lock);
3385
3386         return status;
3387 }
3388
3389 /**
3390  * ice_add_update_vsi_list
3391  * @hw: pointer to the hardware structure
3392  * @m_entry: pointer to current filter management list entry
3393  * @cur_fltr: filter information from the book keeping entry
3394  * @new_fltr: filter information with the new VSI to be added
3395  *
3396  * Call AQ command to add or update previously created VSI list with new VSI.
3397  *
3398  * Helper function to do book keeping associated with adding filter information
3399  * The algorithm to do the book keeping is described below :
3400  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3401  *      if only one VSI has been added till now
3402  *              Allocate a new VSI list and add two VSIs
3403  *              to this list using switch rule command
3404  *              Update the previously created switch rule with the
3405  *              newly created VSI list ID
3406  *      if a VSI list was previously created
3407  *              Add the new VSI to the previously created VSI list set
3408  *              using the update switch rule command
3409  */
3410 static enum ice_status
3411 ice_add_update_vsi_list(struct ice_hw *hw,
3412                         struct ice_fltr_mgmt_list_entry *m_entry,
3413                         struct ice_fltr_info *cur_fltr,
3414                         struct ice_fltr_info *new_fltr)
3415 {
3416         enum ice_status status = ICE_SUCCESS;
3417         u16 vsi_list_id = 0;
3418
3419         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3420              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3421                 return ICE_ERR_NOT_IMPL;
3422
3423         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3424              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3425             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3426              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3427                 return ICE_ERR_NOT_IMPL;
3428
3429         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3430                 /* Only one entry existed in the mapping and it was not already
3431                  * a part of a VSI list. So, create a VSI list with the old and
3432                  * new VSIs.
3433                  */
3434                 struct ice_fltr_info tmp_fltr;
3435                 u16 vsi_handle_arr[2];
3436
3437                 /* A rule already exists with the new VSI being added */
3438                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3439                         return ICE_ERR_ALREADY_EXISTS;
3440
3441                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3442                 vsi_handle_arr[1] = new_fltr->vsi_handle;
3443                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3444                                                   &vsi_list_id,
3445                                                   new_fltr->lkup_type);
3446                 if (status)
3447                         return status;
3448
3449                 tmp_fltr = *new_fltr;
3450                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3451                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3452                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3453                 /* Update the previous switch rule of "MAC forward to VSI" to
3454                  * "MAC fwd to VSI list"
3455                  */
3456                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3457                 if (status)
3458                         return status;
3459
3460                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3461                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3462                 m_entry->vsi_list_info =
3463                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3464                                                 vsi_list_id);
3465
3466                 /* If this entry was large action then the large action needs
3467                  * to be updated to point to FWD to VSI list
3468                  */
3469                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3470                         status =
3471                             ice_add_marker_act(hw, m_entry,
3472                                                m_entry->sw_marker_id,
3473                                                m_entry->lg_act_idx);
3474         } else {
3475                 u16 vsi_handle = new_fltr->vsi_handle;
3476                 enum ice_adminq_opc opcode;
3477
3478                 if (!m_entry->vsi_list_info)
3479                         return ICE_ERR_CFG;
3480
3481                 /* A rule already exists with the new VSI being added */
3482                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3483                         return ICE_SUCCESS;
3484
3485                 /* Update the previously created VSI list set with
3486                  * the new VSI ID passed in
3487                  */
3488                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3489                 opcode = ice_aqc_opc_update_sw_rules;
3490
3491                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3492                                                   vsi_list_id, false, opcode,
3493                                                   new_fltr->lkup_type);
3494                 /* update VSI list mapping info with new VSI ID */
3495                 if (!status)
3496                         ice_set_bit(vsi_handle,
3497                                     m_entry->vsi_list_info->vsi_map);
3498         }
3499         if (!status)
3500                 m_entry->vsi_count++;
3501         return status;
3502 }
3503
3504 /**
3505  * ice_find_rule_entry - Search a rule entry
3506  * @list_head: head of rule list
3507  * @f_info: rule information
3508  *
3509  * Helper function to search for a given rule entry
3510  * Returns pointer to entry storing the rule if found
3511  */
3512 static struct ice_fltr_mgmt_list_entry *
3513 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3514                     struct ice_fltr_info *f_info)
3515 {
3516         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3517
3518         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3519                             list_entry) {
3520                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3521                             sizeof(f_info->l_data)) &&
3522                     f_info->flag == list_itr->fltr_info.flag) {
3523                         ret = list_itr;
3524                         break;
3525                 }
3526         }
3527         return ret;
3528 }
3529
3530 /**
3531  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3532  * @recp_list: VSI lists needs to be searched
3533  * @vsi_handle: VSI handle to be found in VSI list
3534  * @vsi_list_id: VSI list ID found containing vsi_handle
3535  *
3536  * Helper function to search a VSI list with single entry containing given VSI
3537  * handle element. This can be extended further to search VSI list with more
3538  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3539  */
3540 static struct ice_vsi_list_map_info *
3541 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3542                         u16 *vsi_list_id)
3543 {
3544         struct ice_vsi_list_map_info *map_info = NULL;
3545         struct LIST_HEAD_TYPE *list_head;
3546
3547         list_head = &recp_list->filt_rules;
3548         if (recp_list->adv_rule) {
3549                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3550
3551                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3552                                     ice_adv_fltr_mgmt_list_entry,
3553                                     list_entry) {
3554                         if (list_itr->vsi_list_info) {
3555                                 map_info = list_itr->vsi_list_info;
3556                                 if (ice_is_bit_set(map_info->vsi_map,
3557                                                    vsi_handle)) {
3558                                         *vsi_list_id = map_info->vsi_list_id;
3559                                         return map_info;
3560                                 }
3561                         }
3562                 }
3563         } else {
3564                 struct ice_fltr_mgmt_list_entry *list_itr;
3565
3566                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3567                                     ice_fltr_mgmt_list_entry,
3568                                     list_entry) {
3569                         if (list_itr->vsi_count == 1 &&
3570                             list_itr->vsi_list_info) {
3571                                 map_info = list_itr->vsi_list_info;
3572                                 if (ice_is_bit_set(map_info->vsi_map,
3573                                                    vsi_handle)) {
3574                                         *vsi_list_id = map_info->vsi_list_id;
3575                                         return map_info;
3576                                 }
3577                         }
3578                 }
3579         }
3580         return NULL;
3581 }
3582
3583 /**
3584  * ice_add_rule_internal - add rule for a given lookup type
3585  * @hw: pointer to the hardware structure
3586  * @recp_list: recipe list for which rule has to be added
3587  * @lport: logic port number on which function add rule
3588  * @f_entry: structure containing MAC forwarding information
3589  *
3590  * Adds or updates the rule lists for a given recipe
3591  */
3592 static enum ice_status
3593 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3594                       u8 lport, struct ice_fltr_list_entry *f_entry)
3595 {
3596         struct ice_fltr_info *new_fltr, *cur_fltr;
3597         struct ice_fltr_mgmt_list_entry *m_entry;
3598         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3599         enum ice_status status = ICE_SUCCESS;
3600
3601         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3602                 return ICE_ERR_PARAM;
3603
3604         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3605         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3606                 f_entry->fltr_info.fwd_id.hw_vsi_id =
3607                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3608
3609         rule_lock = &recp_list->filt_rule_lock;
3610
3611         ice_acquire_lock(rule_lock);
3612         new_fltr = &f_entry->fltr_info;
3613         if (new_fltr->flag & ICE_FLTR_RX)
3614                 new_fltr->src = lport;
3615         else if (new_fltr->flag & ICE_FLTR_TX)
3616                 new_fltr->src =
3617                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3618
3619         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3620         if (!m_entry) {
3621                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3622                 goto exit_add_rule_internal;
3623         }
3624
3625         cur_fltr = &m_entry->fltr_info;
3626         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3627
3628 exit_add_rule_internal:
3629         ice_release_lock(rule_lock);
3630         return status;
3631 }
3632
3633 /**
3634  * ice_remove_vsi_list_rule
3635  * @hw: pointer to the hardware structure
3636  * @vsi_list_id: VSI list ID generated as part of allocate resource
3637  * @lkup_type: switch rule filter lookup type
3638  *
3639  * The VSI list should be emptied before this function is called to remove the
3640  * VSI list.
3641  */
3642 static enum ice_status
3643 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3644                          enum ice_sw_lkup_type lkup_type)
3645 {
3646         /* Free the vsi_list resource that we allocated. It is assumed that the
3647          * list is empty at this point.
3648          */
3649         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3650                                             ice_aqc_opc_free_res);
3651 }
3652
3653 /**
3654  * ice_rem_update_vsi_list
3655  * @hw: pointer to the hardware structure
3656  * @vsi_handle: VSI handle of the VSI to remove
3657  * @fm_list: filter management entry for which the VSI list management needs to
3658  *           be done
3659  */
3660 static enum ice_status
3661 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3662                         struct ice_fltr_mgmt_list_entry *fm_list)
3663 {
3664         enum ice_sw_lkup_type lkup_type;
3665         enum ice_status status = ICE_SUCCESS;
3666         u16 vsi_list_id;
3667
3668         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3669             fm_list->vsi_count == 0)
3670                 return ICE_ERR_PARAM;
3671
3672         /* A rule with the VSI being removed does not exist */
3673         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3674                 return ICE_ERR_DOES_NOT_EXIST;
3675
3676         lkup_type = fm_list->fltr_info.lkup_type;
3677         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3678         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3679                                           ice_aqc_opc_update_sw_rules,
3680                                           lkup_type);
3681         if (status)
3682                 return status;
3683
3684         fm_list->vsi_count--;
3685         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3686
3687         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3688                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3689                 struct ice_vsi_list_map_info *vsi_list_info =
3690                         fm_list->vsi_list_info;
3691                 u16 rem_vsi_handle;
3692
3693                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3694                                                     ICE_MAX_VSI);
3695                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3696                         return ICE_ERR_OUT_OF_RANGE;
3697
3698                 /* Make sure VSI list is empty before removing it below */
3699                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3700                                                   vsi_list_id, true,
3701                                                   ice_aqc_opc_update_sw_rules,
3702                                                   lkup_type);
3703                 if (status)
3704                         return status;
3705
3706                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3707                 tmp_fltr_info.fwd_id.hw_vsi_id =
3708                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
3709                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3710                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3711                 if (status) {
3712                         ice_debug(hw, ICE_DBG_SW,
3713                                   "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3714                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
3715                         return status;
3716                 }
3717
3718                 fm_list->fltr_info = tmp_fltr_info;
3719         }
3720
3721         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3722             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3723                 struct ice_vsi_list_map_info *vsi_list_info =
3724                         fm_list->vsi_list_info;
3725
3726                 /* Remove the VSI list since it is no longer used */
3727                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3728                 if (status) {
3729                         ice_debug(hw, ICE_DBG_SW,
3730                                   "Failed to remove VSI list %d, error %d\n",
3731                                   vsi_list_id, status);
3732                         return status;
3733                 }
3734
3735                 LIST_DEL(&vsi_list_info->list_entry);
3736                 ice_free(hw, vsi_list_info);
3737                 fm_list->vsi_list_info = NULL;
3738         }
3739
3740         return status;
3741 }
3742
3743 /**
3744  * ice_remove_rule_internal - Remove a filter rule of a given type
3745  *
3746  * @hw: pointer to the hardware structure
3747  * @recp_list: recipe list for which the rule needs to removed
3748  * @f_entry: rule entry containing filter information
3749  */
3750 static enum ice_status
3751 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3752                          struct ice_fltr_list_entry *f_entry)
3753 {
3754         struct ice_fltr_mgmt_list_entry *list_elem;
3755         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3756         enum ice_status status = ICE_SUCCESS;
3757         bool remove_rule = false;
3758         u16 vsi_handle;
3759
3760         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3761                 return ICE_ERR_PARAM;
3762         f_entry->fltr_info.fwd_id.hw_vsi_id =
3763                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3764
3765         rule_lock = &recp_list->filt_rule_lock;
3766         ice_acquire_lock(rule_lock);
3767         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3768                                         &f_entry->fltr_info);
3769         if (!list_elem) {
3770                 status = ICE_ERR_DOES_NOT_EXIST;
3771                 goto exit;
3772         }
3773
3774         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3775                 remove_rule = true;
3776         } else if (!list_elem->vsi_list_info) {
3777                 status = ICE_ERR_DOES_NOT_EXIST;
3778                 goto exit;
3779         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3780                 /* a ref_cnt > 1 indicates that the vsi_list is being
3781                  * shared by multiple rules. Decrement the ref_cnt and
3782                  * remove this rule, but do not modify the list, as it
3783                  * is in-use by other rules.
3784                  */
3785                 list_elem->vsi_list_info->ref_cnt--;
3786                 remove_rule = true;
3787         } else {
3788                 /* a ref_cnt of 1 indicates the vsi_list is only used
3789                  * by one rule. However, the original removal request is only
3790                  * for a single VSI. Update the vsi_list first, and only
3791                  * remove the rule if there are no further VSIs in this list.
3792                  */
3793                 vsi_handle = f_entry->fltr_info.vsi_handle;
3794                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3795                 if (status)
3796                         goto exit;
3797                 /* if VSI count goes to zero after updating the VSI list */
3798                 if (list_elem->vsi_count == 0)
3799                         remove_rule = true;
3800         }
3801
3802         if (remove_rule) {
3803                 /* Remove the lookup rule */
3804                 struct ice_aqc_sw_rules_elem *s_rule;
3805
3806                 s_rule = (struct ice_aqc_sw_rules_elem *)
3807                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3808                 if (!s_rule) {
3809                         status = ICE_ERR_NO_MEMORY;
3810                         goto exit;
3811                 }
3812
3813                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3814                                  ice_aqc_opc_remove_sw_rules);
3815
3816                 status = ice_aq_sw_rules(hw, s_rule,
3817                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3818                                          ice_aqc_opc_remove_sw_rules, NULL);
3819
3820                 /* Remove a book keeping from the list */
3821                 ice_free(hw, s_rule);
3822
3823                 if (status)
3824                         goto exit;
3825
3826                 LIST_DEL(&list_elem->list_entry);
3827                 ice_free(hw, list_elem);
3828         }
3829 exit:
3830         ice_release_lock(rule_lock);
3831         return status;
3832 }
3833
3834 /**
3835  * ice_aq_get_res_alloc - get allocated resources
3836  * @hw: pointer to the HW struct
3837  * @num_entries: pointer to u16 to store the number of resource entries returned
3838  * @buf: pointer to buffer
3839  * @buf_size: size of buf
3840  * @cd: pointer to command details structure or NULL
3841  *
3842  * The caller-supplied buffer must be large enough to store the resource
3843  * information for all resource types. Each resource type is an
3844  * ice_aqc_get_res_resp_elem structure.
3845  */
3846 enum ice_status
3847 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
3848                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
3849                      struct ice_sq_cd *cd)
3850 {
3851         struct ice_aqc_get_res_alloc *resp;
3852         enum ice_status status;
3853         struct ice_aq_desc desc;
3854
3855         if (!buf)
3856                 return ICE_ERR_BAD_PTR;
3857
3858         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3859                 return ICE_ERR_INVAL_SIZE;
3860
3861         resp = &desc.params.get_res;
3862
3863         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3864         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3865
3866         if (!status && num_entries)
3867                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3868
3869         return status;
3870 }
3871
3872 /**
3873  * ice_aq_get_res_descs - get allocated resource descriptors
3874  * @hw: pointer to the hardware structure
3875  * @num_entries: number of resource entries in buffer
3876  * @buf: structure to hold response data buffer
3877  * @buf_size: size of buffer
3878  * @res_type: resource type
3879  * @res_shared: is resource shared
3880  * @desc_id: input - first desc ID to start; output - next desc ID
3881  * @cd: pointer to command details structure or NULL
3882  */
3883 enum ice_status
3884 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3885                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
3886                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
3887 {
3888         struct ice_aqc_get_allocd_res_desc *cmd;
3889         struct ice_aq_desc desc;
3890         enum ice_status status;
3891
3892         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3893
3894         cmd = &desc.params.get_res_desc;
3895
3896         if (!buf)
3897                 return ICE_ERR_PARAM;
3898
3899         if (buf_size != (num_entries * sizeof(*buf)))
3900                 return ICE_ERR_PARAM;
3901
3902         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3903
3904         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3905                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
3906                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3907         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3908
3909         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3910         if (!status)
3911                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3912
3913         return status;
3914 }
3915
3916 /**
3917  * ice_add_mac_rule - Add a MAC address based filter rule
3918  * @hw: pointer to the hardware structure
3919  * @m_list: list of MAC addresses and forwarding information
3920  * @sw: pointer to switch info struct for which function add rule
3921  * @lport: logic port number on which function add rule
3922  *
3923  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3924  * multiple unicast addresses, the function assumes that all the
3925  * addresses are unique in a given add_mac call. It doesn't
3926  * check for duplicates in this case, removing duplicates from a given
3927  * list should be taken care of in the caller of this function.
3928  */
3929 static enum ice_status
3930 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3931                  struct ice_switch_info *sw, u8 lport)
3932 {
3933         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3934         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3935         struct ice_fltr_list_entry *m_list_itr;
3936         struct LIST_HEAD_TYPE *rule_head;
3937         u16 total_elem_left, s_rule_size;
3938         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3939         enum ice_status status = ICE_SUCCESS;
3940         u16 num_unicast = 0;
3941         u8 elem_sent;
3942
3943         s_rule = NULL;
3944         rule_lock = &recp_list->filt_rule_lock;
3945         rule_head = &recp_list->filt_rules;
3946
3947         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3948                             list_entry) {
3949                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3950                 u16 vsi_handle;
3951                 u16 hw_vsi_id;
3952
3953                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3954                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3955                 if (!ice_is_vsi_valid(hw, vsi_handle))
3956                         return ICE_ERR_PARAM;
3957                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3958                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3959                 /* update the src in case it is VSI num */
3960                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3961                         return ICE_ERR_PARAM;
3962                 m_list_itr->fltr_info.src = hw_vsi_id;
3963                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3964                     IS_ZERO_ETHER_ADDR(add))
3965                         return ICE_ERR_PARAM;
3966                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3967                         /* Don't overwrite the unicast address */
3968                         ice_acquire_lock(rule_lock);
3969                         if (ice_find_rule_entry(rule_head,
3970                                                 &m_list_itr->fltr_info)) {
3971                                 ice_release_lock(rule_lock);
3972                                 return ICE_ERR_ALREADY_EXISTS;
3973                         }
3974                         ice_release_lock(rule_lock);
3975                         num_unicast++;
3976                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3977                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3978                         m_list_itr->status =
3979                                 ice_add_rule_internal(hw, recp_list, lport,
3980                                                       m_list_itr);
3981                         if (m_list_itr->status)
3982                                 return m_list_itr->status;
3983                 }
3984         }
3985
3986         ice_acquire_lock(rule_lock);
3987         /* Exit if no suitable entries were found for adding bulk switch rule */
3988         if (!num_unicast) {
3989                 status = ICE_SUCCESS;
3990                 goto ice_add_mac_exit;
3991         }
3992
3993         /* Allocate switch rule buffer for the bulk update for unicast */
3994         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3995         s_rule = (struct ice_aqc_sw_rules_elem *)
3996                 ice_calloc(hw, num_unicast, s_rule_size);
3997         if (!s_rule) {
3998                 status = ICE_ERR_NO_MEMORY;
3999                 goto ice_add_mac_exit;
4000         }
4001
4002         r_iter = s_rule;
4003         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4004                             list_entry) {
4005                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4006                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4007
4008                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4009                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4010                                          ice_aqc_opc_add_sw_rules);
4011                         r_iter = (struct ice_aqc_sw_rules_elem *)
4012                                 ((u8 *)r_iter + s_rule_size);
4013                 }
4014         }
4015
4016         /* Call AQ bulk switch rule update for all unicast addresses */
4017         r_iter = s_rule;
4018         /* Call AQ switch rule in AQ_MAX chunk */
4019         for (total_elem_left = num_unicast; total_elem_left > 0;
4020              total_elem_left -= elem_sent) {
4021                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4022
4023                 elem_sent = MIN_T(u8, total_elem_left,
4024                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4025                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4026                                          elem_sent, ice_aqc_opc_add_sw_rules,
4027                                          NULL);
4028                 if (status)
4029                         goto ice_add_mac_exit;
4030                 r_iter = (struct ice_aqc_sw_rules_elem *)
4031                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4032         }
4033
4034         /* Fill up rule ID based on the value returned from FW */
4035         r_iter = s_rule;
4036         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4037                             list_entry) {
4038                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4039                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4040                 struct ice_fltr_mgmt_list_entry *fm_entry;
4041
4042                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4043                         f_info->fltr_rule_id =
4044                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4045                         f_info->fltr_act = ICE_FWD_TO_VSI;
4046                         /* Create an entry to track this MAC address */
4047                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4048                                 ice_malloc(hw, sizeof(*fm_entry));
4049                         if (!fm_entry) {
4050                                 status = ICE_ERR_NO_MEMORY;
4051                                 goto ice_add_mac_exit;
4052                         }
4053                         fm_entry->fltr_info = *f_info;
4054                         fm_entry->vsi_count = 1;
4055                         /* The book keeping entries will get removed when
4056                          * base driver calls remove filter AQ command
4057                          */
4058
4059                         LIST_ADD(&fm_entry->list_entry, rule_head);
4060                         r_iter = (struct ice_aqc_sw_rules_elem *)
4061                                 ((u8 *)r_iter + s_rule_size);
4062                 }
4063         }
4064
4065 ice_add_mac_exit:
4066         ice_release_lock(rule_lock);
4067         if (s_rule)
4068                 ice_free(hw, s_rule);
4069         return status;
4070 }
4071
4072 /**
4073  * ice_add_mac - Add a MAC address based filter rule
4074  * @hw: pointer to the hardware structure
4075  * @m_list: list of MAC addresses and forwarding information
4076  *
4077  * Function add MAC rule for logical port from HW struct
4078  */
4079 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4080 {
4081         if (!m_list || !hw)
4082                 return ICE_ERR_PARAM;
4083
4084         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4085                                 hw->port_info->lport);
4086 }
4087
4088 /**
4089  * ice_add_vlan_internal - Add one VLAN based filter rule
4090  * @hw: pointer to the hardware structure
4091  * @recp_list: recipe list for which rule has to be added
4092  * @f_entry: filter entry containing one VLAN information
4093  */
4094 static enum ice_status
4095 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4096                       struct ice_fltr_list_entry *f_entry)
4097 {
4098         struct ice_fltr_mgmt_list_entry *v_list_itr;
4099         struct ice_fltr_info *new_fltr, *cur_fltr;
4100         enum ice_sw_lkup_type lkup_type;
4101         u16 vsi_list_id = 0, vsi_handle;
4102         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4103         enum ice_status status = ICE_SUCCESS;
4104
4105         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4106                 return ICE_ERR_PARAM;
4107
4108         f_entry->fltr_info.fwd_id.hw_vsi_id =
4109                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4110         new_fltr = &f_entry->fltr_info;
4111
4112         /* VLAN ID should only be 12 bits */
4113         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4114                 return ICE_ERR_PARAM;
4115
4116         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4117                 return ICE_ERR_PARAM;
4118
4119         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4120         lkup_type = new_fltr->lkup_type;
4121         vsi_handle = new_fltr->vsi_handle;
4122         rule_lock = &recp_list->filt_rule_lock;
4123         ice_acquire_lock(rule_lock);
4124         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4125         if (!v_list_itr) {
4126                 struct ice_vsi_list_map_info *map_info = NULL;
4127
4128                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4129                         /* All VLAN pruning rules use a VSI list. Check if
4130                          * there is already a VSI list containing VSI that we
4131                          * want to add. If found, use the same vsi_list_id for
4132                          * this new VLAN rule or else create a new list.
4133                          */
4134                         map_info = ice_find_vsi_list_entry(recp_list,
4135                                                            vsi_handle,
4136                                                            &vsi_list_id);
4137                         if (!map_info) {
4138                                 status = ice_create_vsi_list_rule(hw,
4139                                                                   &vsi_handle,
4140                                                                   1,
4141                                                                   &vsi_list_id,
4142                                                                   lkup_type);
4143                                 if (status)
4144                                         goto exit;
4145                         }
4146                         /* Convert the action to forwarding to a VSI list. */
4147                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4148                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4149                 }
4150
4151                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4152                 if (!status) {
4153                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4154                                                          new_fltr);
4155                         if (!v_list_itr) {
4156                                 status = ICE_ERR_DOES_NOT_EXIST;
4157                                 goto exit;
4158                         }
4159                         /* reuse VSI list for new rule and increment ref_cnt */
4160                         if (map_info) {
4161                                 v_list_itr->vsi_list_info = map_info;
4162                                 map_info->ref_cnt++;
4163                         } else {
4164                                 v_list_itr->vsi_list_info =
4165                                         ice_create_vsi_list_map(hw, &vsi_handle,
4166                                                                 1, vsi_list_id);
4167                         }
4168                 }
4169         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4170                 /* Update existing VSI list to add new VSI ID only if it used
4171                  * by one VLAN rule.
4172                  */
4173                 cur_fltr = &v_list_itr->fltr_info;
4174                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4175                                                  new_fltr);
4176         } else {
4177                 /* If VLAN rule exists and VSI list being used by this rule is
4178                  * referenced by more than 1 VLAN rule. Then create a new VSI
4179                  * list appending previous VSI with new VSI and update existing
4180                  * VLAN rule to point to new VSI list ID
4181                  */
4182                 struct ice_fltr_info tmp_fltr;
4183                 u16 vsi_handle_arr[2];
4184                 u16 cur_handle;
4185
4186                 /* Current implementation only supports reusing VSI list with
4187                  * one VSI count. We should never hit below condition
4188                  */
4189                 if (v_list_itr->vsi_count > 1 &&
4190                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4191                         ice_debug(hw, ICE_DBG_SW,
4192                                   "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4193                         status = ICE_ERR_CFG;
4194                         goto exit;
4195                 }
4196
4197                 cur_handle =
4198                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4199                                            ICE_MAX_VSI);
4200
4201                 /* A rule already exists with the new VSI being added */
4202                 if (cur_handle == vsi_handle) {
4203                         status = ICE_ERR_ALREADY_EXISTS;
4204                         goto exit;
4205                 }
4206
4207                 vsi_handle_arr[0] = cur_handle;
4208                 vsi_handle_arr[1] = vsi_handle;
4209                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4210                                                   &vsi_list_id, lkup_type);
4211                 if (status)
4212                         goto exit;
4213
4214                 tmp_fltr = v_list_itr->fltr_info;
4215                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4216                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4217                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4218                 /* Update the previous switch rule to a new VSI list which
4219                  * includes current VSI that is requested
4220                  */
4221                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4222                 if (status)
4223                         goto exit;
4224
4225                 /* before overriding VSI list map info. decrement ref_cnt of
4226                  * previous VSI list
4227                  */
4228                 v_list_itr->vsi_list_info->ref_cnt--;
4229
4230                 /* now update to newly created list */
4231                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4232                 v_list_itr->vsi_list_info =
4233                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4234                                                 vsi_list_id);
4235                 v_list_itr->vsi_count++;
4236         }
4237
4238 exit:
4239         ice_release_lock(rule_lock);
4240         return status;
4241 }
4242
4243 /**
4244  * ice_add_vlan_rule - Add VLAN based filter rule
4245  * @hw: pointer to the hardware structure
4246  * @v_list: list of VLAN entries and forwarding information
4247  * @sw: pointer to switch info struct for which function add rule
4248  */
4249 static enum ice_status
4250 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4251                   struct ice_switch_info *sw)
4252 {
4253         struct ice_fltr_list_entry *v_list_itr;
4254         struct ice_sw_recipe *recp_list;
4255
4256         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4257         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4258                             list_entry) {
4259                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4260                         return ICE_ERR_PARAM;
4261                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4262                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4263                                                            v_list_itr);
4264                 if (v_list_itr->status)
4265                         return v_list_itr->status;
4266         }
4267         return ICE_SUCCESS;
4268 }
4269
4270 /**
4271  * ice_add_vlan - Add a VLAN based filter rule
4272  * @hw: pointer to the hardware structure
4273  * @v_list: list of VLAN and forwarding information
4274  *
4275  * Function add VLAN rule for logical port from HW struct
4276  */
4277 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4278 {
4279         if (!v_list || !hw)
4280                 return ICE_ERR_PARAM;
4281
4282         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4283 }
4284
4285 /**
4286  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4287  * @hw: pointer to the hardware structure
4288  * @mv_list: list of MAC and VLAN filters
4289  * @sw: pointer to switch info struct for which function add rule
4290  * @lport: logic port number on which function add rule
4291  *
4292  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4293  * pruning bits enabled, then it is the responsibility of the caller to make
4294  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4295  * VLAN won't be received on that VSI otherwise.
4296  */
4297 static enum ice_status
4298 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4299                       struct ice_switch_info *sw, u8 lport)
4300 {
4301         struct ice_fltr_list_entry *mv_list_itr;
4302         struct ice_sw_recipe *recp_list;
4303
4304         if (!mv_list || !hw)
4305                 return ICE_ERR_PARAM;
4306
4307         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4308         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4309                             list_entry) {
4310                 enum ice_sw_lkup_type l_type =
4311                         mv_list_itr->fltr_info.lkup_type;
4312
4313                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4314                         return ICE_ERR_PARAM;
4315                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4316                 mv_list_itr->status =
4317                         ice_add_rule_internal(hw, recp_list, lport,
4318                                               mv_list_itr);
4319                 if (mv_list_itr->status)
4320                         return mv_list_itr->status;
4321         }
4322         return ICE_SUCCESS;
4323 }
4324
4325 /**
4326  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4327  * @hw: pointer to the hardware structure
4328  * @mv_list: list of MAC VLAN addresses and forwarding information
4329  *
4330  * Function add MAC VLAN rule for logical port from HW struct
4331  */
4332 enum ice_status
4333 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4334 {
4335         if (!mv_list || !hw)
4336                 return ICE_ERR_PARAM;
4337
4338         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4339                                      hw->port_info->lport);
4340 }
4341
4342 /**
4343  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4344  * @hw: pointer to the hardware structure
4345  * @em_list: list of ether type MAC filter, MAC is optional
4346  * @sw: pointer to switch info struct for which function add rule
4347  * @lport: logic port number on which function add rule
4348  *
4349  * This function requires the caller to populate the entries in
4350  * the filter list with the necessary fields (including flags to
4351  * indicate Tx or Rx rules).
4352  */
4353 static enum ice_status
4354 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4355                      struct ice_switch_info *sw, u8 lport)
4356 {
4357         struct ice_fltr_list_entry *em_list_itr;
4358
4359         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4360                             list_entry) {
4361                 struct ice_sw_recipe *recp_list;
4362                 enum ice_sw_lkup_type l_type;
4363
4364                 l_type = em_list_itr->fltr_info.lkup_type;
4365                 recp_list = &sw->recp_list[l_type];
4366
4367                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4368                     l_type != ICE_SW_LKUP_ETHERTYPE)
4369                         return ICE_ERR_PARAM;
4370
4371                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4372                                                             lport,
4373                                                             em_list_itr);
4374                 if (em_list_itr->status)
4375                         return em_list_itr->status;
4376         }
4377         return ICE_SUCCESS;
4378 }
4379
4380 /**
4381  * ice_add_eth_mac - Add a ethertype based filter rule
4382  * @hw: pointer to the hardware structure
4383  * @em_list: list of ethertype and forwarding information
4384  *
4385  * Function add ethertype rule for logical port from HW struct
4386  */
4387 enum ice_status
4388 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4389 {
4390         if (!em_list || !hw)
4391                 return ICE_ERR_PARAM;
4392
4393         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4394                                     hw->port_info->lport);
4395 }
4396
4397 /**
4398  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4399  * @hw: pointer to the hardware structure
4400  * @em_list: list of ethertype or ethertype MAC entries
4401  * @sw: pointer to switch info struct for which function add rule
4402  */
4403 static enum ice_status
4404 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4405                         struct ice_switch_info *sw)
4406 {
4407         struct ice_fltr_list_entry *em_list_itr, *tmp;
4408
4409         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4410                                  list_entry) {
4411                 struct ice_sw_recipe *recp_list;
4412                 enum ice_sw_lkup_type l_type;
4413
4414                 l_type = em_list_itr->fltr_info.lkup_type;
4415
4416                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4417                     l_type != ICE_SW_LKUP_ETHERTYPE)
4418                         return ICE_ERR_PARAM;
4419
4420                 recp_list = &sw->recp_list[l_type];
4421                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4422                                                                em_list_itr);
4423                 if (em_list_itr->status)
4424                         return em_list_itr->status;
4425         }
4426         return ICE_SUCCESS;
4427 }
4428
4429 /**
4430  * ice_remove_eth_mac - remove a ethertype based filter rule
4431  * @hw: pointer to the hardware structure
4432  * @em_list: list of ethertype and forwarding information
4433  *
4434  */
4435 enum ice_status
4436 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4437 {
4438         if (!em_list || !hw)
4439                 return ICE_ERR_PARAM;
4440
4441         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4442 }
4443
4444 /**
4445  * ice_rem_sw_rule_info
4446  * @hw: pointer to the hardware structure
4447  * @rule_head: pointer to the switch list structure that we want to delete
4448  */
4449 static void
4450 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4451 {
4452         if (!LIST_EMPTY(rule_head)) {
4453                 struct ice_fltr_mgmt_list_entry *entry;
4454                 struct ice_fltr_mgmt_list_entry *tmp;
4455
4456                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4457                                          ice_fltr_mgmt_list_entry, list_entry) {
4458                         LIST_DEL(&entry->list_entry);
4459                         ice_free(hw, entry);
4460                 }
4461         }
4462 }
4463
4464 /**
4465  * ice_rem_adv_rule_info
4466  * @hw: pointer to the hardware structure
4467  * @rule_head: pointer to the switch list structure that we want to delete
4468  */
4469 static void
4470 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4471 {
4472         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4473         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4474
4475         if (LIST_EMPTY(rule_head))
4476                 return;
4477
4478         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4479                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
4480                 LIST_DEL(&lst_itr->list_entry);
4481                 ice_free(hw, lst_itr->lkups);
4482                 ice_free(hw, lst_itr);
4483         }
4484 }
4485
4486 /**
4487  * ice_rem_all_sw_rules_info
4488  * @hw: pointer to the hardware structure
4489  */
4490 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4491 {
4492         struct ice_switch_info *sw = hw->switch_info;
4493         u8 i;
4494
4495         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4496                 struct LIST_HEAD_TYPE *rule_head;
4497
4498                 rule_head = &sw->recp_list[i].filt_rules;
4499                 if (!sw->recp_list[i].adv_rule)
4500                         ice_rem_sw_rule_info(hw, rule_head);
4501                 else
4502                         ice_rem_adv_rule_info(hw, rule_head);
4503                 if (sw->recp_list[i].adv_rule &&
4504                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
4505                         sw->recp_list[i].adv_rule = false;
4506         }
4507 }
4508
4509 /**
4510  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4511  * @pi: pointer to the port_info structure
4512  * @vsi_handle: VSI handle to set as default
4513  * @set: true to add the above mentioned switch rule, false to remove it
4514  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4515  *
4516  * add filter rule to set/unset given VSI as default VSI for the switch
4517  * (represented by swid)
4518  */
4519 enum ice_status
4520 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4521                  u8 direction)
4522 {
4523         struct ice_aqc_sw_rules_elem *s_rule;
4524         struct ice_fltr_info f_info;
4525         struct ice_hw *hw = pi->hw;
4526         enum ice_adminq_opc opcode;
4527         enum ice_status status;
4528         u16 s_rule_size;
4529         u16 hw_vsi_id;
4530
4531         if (!ice_is_vsi_valid(hw, vsi_handle))
4532                 return ICE_ERR_PARAM;
4533         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4534
4535         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4536                             ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4537         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4538         if (!s_rule)
4539                 return ICE_ERR_NO_MEMORY;
4540
4541         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4542
4543         f_info.lkup_type = ICE_SW_LKUP_DFLT;
4544         f_info.flag = direction;
4545         f_info.fltr_act = ICE_FWD_TO_VSI;
4546         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4547
4548         if (f_info.flag & ICE_FLTR_RX) {
4549                 f_info.src = pi->lport;
4550                 f_info.src_id = ICE_SRC_ID_LPORT;
4551                 if (!set)
4552                         f_info.fltr_rule_id =
4553                                 pi->dflt_rx_vsi_rule_id;
4554         } else if (f_info.flag & ICE_FLTR_TX) {
4555                 f_info.src_id = ICE_SRC_ID_VSI;
4556                 f_info.src = hw_vsi_id;
4557                 if (!set)
4558                         f_info.fltr_rule_id =
4559                                 pi->dflt_tx_vsi_rule_id;
4560         }
4561
4562         if (set)
4563                 opcode = ice_aqc_opc_add_sw_rules;
4564         else
4565                 opcode = ice_aqc_opc_remove_sw_rules;
4566
4567         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4568
4569         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4570         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4571                 goto out;
4572         if (set) {
4573                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4574
4575                 if (f_info.flag & ICE_FLTR_TX) {
4576                         pi->dflt_tx_vsi_num = hw_vsi_id;
4577                         pi->dflt_tx_vsi_rule_id = index;
4578                 } else if (f_info.flag & ICE_FLTR_RX) {
4579                         pi->dflt_rx_vsi_num = hw_vsi_id;
4580                         pi->dflt_rx_vsi_rule_id = index;
4581                 }
4582         } else {
4583                 if (f_info.flag & ICE_FLTR_TX) {
4584                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4585                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4586                 } else if (f_info.flag & ICE_FLTR_RX) {
4587                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4588                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4589                 }
4590         }
4591
4592 out:
4593         ice_free(hw, s_rule);
4594         return status;
4595 }
4596
4597 /**
4598  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4599  * @list_head: head of rule list
4600  * @f_info: rule information
4601  *
4602  * Helper function to search for a unicast rule entry - this is to be used
4603  * to remove unicast MAC filter that is not shared with other VSIs on the
4604  * PF switch.
4605  *
4606  * Returns pointer to entry storing the rule if found
4607  */
4608 static struct ice_fltr_mgmt_list_entry *
4609 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4610                           struct ice_fltr_info *f_info)
4611 {
4612         struct ice_fltr_mgmt_list_entry *list_itr;
4613
4614         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4615                             list_entry) {
4616                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4617                             sizeof(f_info->l_data)) &&
4618                     f_info->fwd_id.hw_vsi_id ==
4619                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
4620                     f_info->flag == list_itr->fltr_info.flag)
4621                         return list_itr;
4622         }
4623         return NULL;
4624 }
4625
4626 /**
4627  * ice_remove_mac_rule - remove a MAC based filter rule
4628  * @hw: pointer to the hardware structure
4629  * @m_list: list of MAC addresses and forwarding information
4630  * @recp_list: list from which function remove MAC address
4631  *
4632  * This function removes either a MAC filter rule or a specific VSI from a
4633  * VSI list for a multicast MAC address.
4634  *
4635  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4636  * ice_add_mac. Caller should be aware that this call will only work if all
4637  * the entries passed into m_list were added previously. It will not attempt to
4638  * do a partial remove of entries that were found.
4639  */
4640 static enum ice_status
4641 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4642                     struct ice_sw_recipe *recp_list)
4643 {
4644         struct ice_fltr_list_entry *list_itr, *tmp;
4645         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4646
4647         if (!m_list)
4648                 return ICE_ERR_PARAM;
4649
4650         rule_lock = &recp_list->filt_rule_lock;
4651         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4652                                  list_entry) {
4653                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4654                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4655                 u16 vsi_handle;
4656
4657                 if (l_type != ICE_SW_LKUP_MAC)
4658                         return ICE_ERR_PARAM;
4659
4660                 vsi_handle = list_itr->fltr_info.vsi_handle;
4661                 if (!ice_is_vsi_valid(hw, vsi_handle))
4662                         return ICE_ERR_PARAM;
4663
4664                 list_itr->fltr_info.fwd_id.hw_vsi_id =
4665                                         ice_get_hw_vsi_num(hw, vsi_handle);
4666                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4667                         /* Don't remove the unicast address that belongs to
4668                          * another VSI on the switch, since it is not being
4669                          * shared...
4670                          */
4671                         ice_acquire_lock(rule_lock);
4672                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4673                                                        &list_itr->fltr_info)) {
4674                                 ice_release_lock(rule_lock);
4675                                 return ICE_ERR_DOES_NOT_EXIST;
4676                         }
4677                         ice_release_lock(rule_lock);
4678                 }
4679                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4680                                                             list_itr);
4681                 if (list_itr->status)
4682                         return list_itr->status;
4683         }
4684         return ICE_SUCCESS;
4685 }
4686
4687 /**
4688  * ice_remove_mac - remove a MAC address based filter rule
4689  * @hw: pointer to the hardware structure
4690  * @m_list: list of MAC addresses and forwarding information
4691  *
4692  */
4693 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4694 {
4695         struct ice_sw_recipe *recp_list;
4696
4697         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4698         return ice_remove_mac_rule(hw, m_list, recp_list);
4699 }
4700
4701 /**
4702  * ice_remove_vlan_rule - Remove VLAN based filter rule
4703  * @hw: pointer to the hardware structure
4704  * @v_list: list of VLAN entries and forwarding information
4705  * @recp_list: list from which function remove VLAN
4706  */
4707 static enum ice_status
4708 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4709                      struct ice_sw_recipe *recp_list)
4710 {
4711         struct ice_fltr_list_entry *v_list_itr, *tmp;
4712
4713         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4714                                  list_entry) {
4715                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4716
4717                 if (l_type != ICE_SW_LKUP_VLAN)
4718                         return ICE_ERR_PARAM;
4719                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4720                                                               v_list_itr);
4721                 if (v_list_itr->status)
4722                         return v_list_itr->status;
4723         }
4724         return ICE_SUCCESS;
4725 }
4726
4727 /**
4728  * ice_remove_vlan - remove a VLAN address based filter rule
4729  * @hw: pointer to the hardware structure
4730  * @v_list: list of VLAN and forwarding information
4731  *
4732  */
4733 enum ice_status
4734 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4735 {
4736         struct ice_sw_recipe *recp_list;
4737
4738         if (!v_list || !hw)
4739                 return ICE_ERR_PARAM;
4740
4741         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4742         return ice_remove_vlan_rule(hw, v_list, recp_list);
4743 }
4744
4745 /**
4746  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4747  * @hw: pointer to the hardware structure
4748  * @v_list: list of MAC VLAN entries and forwarding information
4749  * @recp_list: list from which function remove MAC VLAN
4750  */
4751 static enum ice_status
4752 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4753                          struct ice_sw_recipe *recp_list)
4754 {
4755         struct ice_fltr_list_entry *v_list_itr, *tmp;
4756
4757         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4758         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4759                                  list_entry) {
4760                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4761
4762                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4763                         return ICE_ERR_PARAM;
4764                 v_list_itr->status =
4765                         ice_remove_rule_internal(hw, recp_list,
4766                                                  v_list_itr);
4767                 if (v_list_itr->status)
4768                         return v_list_itr->status;
4769         }
4770         return ICE_SUCCESS;
4771 }
4772
4773 /**
4774  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4775  * @hw: pointer to the hardware structure
4776  * @mv_list: list of MAC VLAN and forwarding information
4777  */
4778 enum ice_status
4779 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4780 {
4781         struct ice_sw_recipe *recp_list;
4782
4783         if (!mv_list || !hw)
4784                 return ICE_ERR_PARAM;
4785
4786         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4787         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4788 }
4789
4790 /**
4791  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4792  * @fm_entry: filter entry to inspect
4793  * @vsi_handle: VSI handle to compare with filter info
4794  */
4795 static bool
4796 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4797 {
4798         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4799                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4800                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4801                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4802                                  vsi_handle))));
4803 }
4804
4805 /**
4806  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4807  * @hw: pointer to the hardware structure
4808  * @vsi_handle: VSI handle to remove filters from
4809  * @vsi_list_head: pointer to the list to add entry to
4810  * @fi: pointer to fltr_info of filter entry to copy & add
4811  *
4812  * Helper function, used when creating a list of filters to remove from
4813  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4814  * original filter entry, with the exception of fltr_info.fltr_act and
4815  * fltr_info.fwd_id fields. These are set such that later logic can
4816  * extract which VSI to remove the fltr from, and pass on that information.
4817  */
4818 static enum ice_status
4819 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4820                                struct LIST_HEAD_TYPE *vsi_list_head,
4821                                struct ice_fltr_info *fi)
4822 {
4823         struct ice_fltr_list_entry *tmp;
4824
4825         /* this memory is freed up in the caller function
4826          * once filters for this VSI are removed
4827          */
4828         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4829         if (!tmp)
4830                 return ICE_ERR_NO_MEMORY;
4831
4832         tmp->fltr_info = *fi;
4833
4834         /* Overwrite these fields to indicate which VSI to remove filter from,
4835          * so find and remove logic can extract the information from the
4836          * list entries. Note that original entries will still have proper
4837          * values.
4838          */
4839         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4840         tmp->fltr_info.vsi_handle = vsi_handle;
4841         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4842
4843         LIST_ADD(&tmp->list_entry, vsi_list_head);
4844
4845         return ICE_SUCCESS;
4846 }
4847
4848 /**
4849  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4850  * @hw: pointer to the hardware structure
4851  * @vsi_handle: VSI handle to remove filters from
4852  * @lkup_list_head: pointer to the list that has certain lookup type filters
4853  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4854  *
4855  * Locates all filters in lkup_list_head that are used by the given VSI,
4856  * and adds COPIES of those entries to vsi_list_head (intended to be used
4857  * to remove the listed filters).
4858  * Note that this means all entries in vsi_list_head must be explicitly
4859  * deallocated by the caller when done with list.
4860  */
4861 static enum ice_status
4862 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4863                          struct LIST_HEAD_TYPE *lkup_list_head,
4864                          struct LIST_HEAD_TYPE *vsi_list_head)
4865 {
4866         struct ice_fltr_mgmt_list_entry *fm_entry;
4867         enum ice_status status = ICE_SUCCESS;
4868
4869         /* check to make sure VSI ID is valid and within boundary */
4870         if (!ice_is_vsi_valid(hw, vsi_handle))
4871                 return ICE_ERR_PARAM;
4872
4873         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4874                             ice_fltr_mgmt_list_entry, list_entry) {
4875                 struct ice_fltr_info *fi;
4876
4877                 fi = &fm_entry->fltr_info;
4878                 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4879                         continue;
4880
4881                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4882                                                         vsi_list_head, fi);
4883                 if (status)
4884                         return status;
4885         }
4886         return status;
4887 }
4888
4889 /**
4890  * ice_determine_promisc_mask
4891  * @fi: filter info to parse
4892  *
4893  * Helper function to determine which ICE_PROMISC_ mask corresponds
4894  * to given filter into.
4895  */
4896 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4897 {
4898         u16 vid = fi->l_data.mac_vlan.vlan_id;
4899         u8 *macaddr = fi->l_data.mac.mac_addr;
4900         bool is_tx_fltr = false;
4901         u8 promisc_mask = 0;
4902
4903         if (fi->flag == ICE_FLTR_TX)
4904                 is_tx_fltr = true;
4905
4906         if (IS_BROADCAST_ETHER_ADDR(macaddr))
4907                 promisc_mask |= is_tx_fltr ?
4908                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4909         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4910                 promisc_mask |= is_tx_fltr ?
4911                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4912         else if (IS_UNICAST_ETHER_ADDR(macaddr))
4913                 promisc_mask |= is_tx_fltr ?
4914                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4915         if (vid)
4916                 promisc_mask |= is_tx_fltr ?
4917                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4918
4919         return promisc_mask;
4920 }
4921
4922 /**
4923  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4924  * @hw: pointer to the hardware structure
4925  * @vsi_handle: VSI handle to retrieve info from
4926  * @promisc_mask: pointer to mask to be filled in
4927  * @vid: VLAN ID of promisc VLAN VSI
4928  * @sw: pointer to switch info struct for which function add rule
4929  */
4930 static enum ice_status
4931 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4932                      u16 *vid, struct ice_switch_info *sw)
4933 {
4934         struct ice_fltr_mgmt_list_entry *itr;
4935         struct LIST_HEAD_TYPE *rule_head;
4936         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4937
4938         if (!ice_is_vsi_valid(hw, vsi_handle))
4939                 return ICE_ERR_PARAM;
4940
4941         *vid = 0;
4942         *promisc_mask = 0;
4943         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4944         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4945
4946         ice_acquire_lock(rule_lock);
4947         LIST_FOR_EACH_ENTRY(itr, rule_head,
4948                             ice_fltr_mgmt_list_entry, list_entry) {
4949                 /* Continue if this filter doesn't apply to this VSI or the
4950                  * VSI ID is not in the VSI map for this filter
4951                  */
4952                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4953                         continue;
4954
4955                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4956         }
4957         ice_release_lock(rule_lock);
4958
4959         return ICE_SUCCESS;
4960 }
4961
4962 /**
4963  * ice_get_vsi_promisc - get promiscuous mode of given VSI
4964  * @hw: pointer to the hardware structure
4965  * @vsi_handle: VSI handle to retrieve info from
4966  * @promisc_mask: pointer to mask to be filled in
4967  * @vid: VLAN ID of promisc VLAN VSI
4968  */
4969 enum ice_status
4970 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4971                     u16 *vid)
4972 {
4973         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4974                                     vid, hw->switch_info);
4975 }
4976
4977 /**
4978  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4979  * @hw: pointer to the hardware structure
4980  * @vsi_handle: VSI handle to retrieve info from
4981  * @promisc_mask: pointer to mask to be filled in
4982  * @vid: VLAN ID of promisc VLAN VSI
4983  * @sw: pointer to switch info struct for which function add rule
4984  */
4985 static enum ice_status
4986 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4987                           u16 *vid, struct ice_switch_info *sw)
4988 {
4989         struct ice_fltr_mgmt_list_entry *itr;
4990         struct LIST_HEAD_TYPE *rule_head;
4991         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4992
4993         if (!ice_is_vsi_valid(hw, vsi_handle))
4994                 return ICE_ERR_PARAM;
4995
4996         *vid = 0;
4997         *promisc_mask = 0;
4998         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4999         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5000
5001         ice_acquire_lock(rule_lock);
5002         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5003                             list_entry) {
5004                 /* Continue if this filter doesn't apply to this VSI or the
5005                  * VSI ID is not in the VSI map for this filter
5006                  */
5007                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5008                         continue;
5009
5010                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5011         }
5012         ice_release_lock(rule_lock);
5013
5014         return ICE_SUCCESS;
5015 }
5016
5017 /**
5018  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5019  * @hw: pointer to the hardware structure
5020  * @vsi_handle: VSI handle to retrieve info from
5021  * @promisc_mask: pointer to mask to be filled in
5022  * @vid: VLAN ID of promisc VLAN VSI
5023  */
5024 enum ice_status
5025 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5026                          u16 *vid)
5027 {
5028         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5029                                          vid, hw->switch_info);
5030 }
5031
5032 /**
5033  * ice_remove_promisc - Remove promisc based filter rules
5034  * @hw: pointer to the hardware structure
5035  * @recp_id: recipe ID for which the rule needs to removed
5036  * @v_list: list of promisc entries
5037  */
5038 static enum ice_status
5039 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5040                    struct LIST_HEAD_TYPE *v_list)
5041 {
5042         struct ice_fltr_list_entry *v_list_itr, *tmp;
5043         struct ice_sw_recipe *recp_list;
5044
5045         recp_list = &hw->switch_info->recp_list[recp_id];
5046         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5047                                  list_entry) {
5048                 v_list_itr->status =
5049                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5050                 if (v_list_itr->status)
5051                         return v_list_itr->status;
5052         }
5053         return ICE_SUCCESS;
5054 }
5055
5056 /**
5057  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5058  * @hw: pointer to the hardware structure
5059  * @vsi_handle: VSI handle to clear mode
5060  * @promisc_mask: mask of promiscuous config bits to clear
5061  * @vid: VLAN ID to clear VLAN promiscuous
5062  * @sw: pointer to switch info struct for which function add rule
5063  */
5064 static enum ice_status
5065 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5066                        u16 vid, struct ice_switch_info *sw)
5067 {
5068         struct ice_fltr_list_entry *fm_entry, *tmp;
5069         struct LIST_HEAD_TYPE remove_list_head;
5070         struct ice_fltr_mgmt_list_entry *itr;
5071         struct LIST_HEAD_TYPE *rule_head;
5072         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5073         enum ice_status status = ICE_SUCCESS;
5074         u8 recipe_id;
5075
5076         if (!ice_is_vsi_valid(hw, vsi_handle))
5077                 return ICE_ERR_PARAM;
5078
5079         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5080                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5081         else
5082                 recipe_id = ICE_SW_LKUP_PROMISC;
5083
5084         rule_head = &sw->recp_list[recipe_id].filt_rules;
5085         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5086
5087         INIT_LIST_HEAD(&remove_list_head);
5088
5089         ice_acquire_lock(rule_lock);
5090         LIST_FOR_EACH_ENTRY(itr, rule_head,
5091                             ice_fltr_mgmt_list_entry, list_entry) {
5092                 struct ice_fltr_info *fltr_info;
5093                 u8 fltr_promisc_mask = 0;
5094
5095                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5096                         continue;
5097                 fltr_info = &itr->fltr_info;
5098
5099                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5100                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5101                         continue;
5102
5103                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5104
5105                 /* Skip if filter is not completely specified by given mask */
5106                 if (fltr_promisc_mask & ~promisc_mask)
5107                         continue;
5108
5109                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5110                                                         &remove_list_head,
5111                                                         fltr_info);
5112                 if (status) {
5113                         ice_release_lock(rule_lock);
5114                         goto free_fltr_list;
5115                 }
5116         }
5117         ice_release_lock(rule_lock);
5118
5119         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5120
5121 free_fltr_list:
5122         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5123                                  ice_fltr_list_entry, list_entry) {
5124                 LIST_DEL(&fm_entry->list_entry);
5125                 ice_free(hw, fm_entry);
5126         }
5127
5128         return status;
5129 }
5130
5131 /**
5132  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5133  * @hw: pointer to the hardware structure
5134  * @vsi_handle: VSI handle to clear mode
5135  * @promisc_mask: mask of promiscuous config bits to clear
5136  * @vid: VLAN ID to clear VLAN promiscuous
5137  */
5138 enum ice_status
5139 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5140                       u8 promisc_mask, u16 vid)
5141 {
5142         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5143                                       vid, hw->switch_info);
5144 }
5145
5146 /**
5147  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5148  * @hw: pointer to the hardware structure
5149  * @vsi_handle: VSI handle to configure
5150  * @promisc_mask: mask of promiscuous config bits
5151  * @vid: VLAN ID to set VLAN promiscuous
5152  * @lport: logical port number to configure promisc mode
5153  * @sw: pointer to switch info struct for which function add rule
5154  */
5155 static enum ice_status
5156 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5157                      u16 vid, u8 lport, struct ice_switch_info *sw)
5158 {
5159         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5160         struct ice_fltr_list_entry f_list_entry;
5161         struct ice_fltr_info new_fltr;
5162         enum ice_status status = ICE_SUCCESS;
5163         bool is_tx_fltr;
5164         u16 hw_vsi_id;
5165         int pkt_type;
5166         u8 recipe_id;
5167
5168         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5169
5170         if (!ice_is_vsi_valid(hw, vsi_handle))
5171                 return ICE_ERR_PARAM;
5172         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5173
5174         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5175
5176         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5177                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5178                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5179                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5180         } else {
5181                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5182                 recipe_id = ICE_SW_LKUP_PROMISC;
5183         }
5184
5185         /* Separate filters must be set for each direction/packet type
5186          * combination, so we will loop over the mask value, store the
5187          * individual type, and clear it out in the input mask as it
5188          * is found.
5189          */
5190         while (promisc_mask) {
5191                 struct ice_sw_recipe *recp_list;
5192                 u8 *mac_addr;
5193
5194                 pkt_type = 0;
5195                 is_tx_fltr = false;
5196
5197                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5198                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5199                         pkt_type = UCAST_FLTR;
5200                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5201                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5202                         pkt_type = UCAST_FLTR;
5203                         is_tx_fltr = true;
5204                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5205                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5206                         pkt_type = MCAST_FLTR;
5207                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5208                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5209                         pkt_type = MCAST_FLTR;
5210                         is_tx_fltr = true;
5211                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5212                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5213                         pkt_type = BCAST_FLTR;
5214                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5215                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5216                         pkt_type = BCAST_FLTR;
5217                         is_tx_fltr = true;
5218                 }
5219
5220                 /* Check for VLAN promiscuous flag */
5221                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5222                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5223                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5224                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5225                         is_tx_fltr = true;
5226                 }
5227
5228                 /* Set filter DA based on packet type */
5229                 mac_addr = new_fltr.l_data.mac.mac_addr;
5230                 if (pkt_type == BCAST_FLTR) {
5231                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5232                 } else if (pkt_type == MCAST_FLTR ||
5233                            pkt_type == UCAST_FLTR) {
5234                         /* Use the dummy ether header DA */
5235                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5236                                    ICE_NONDMA_TO_NONDMA);
5237                         if (pkt_type == MCAST_FLTR)
5238                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
5239                 }
5240
5241                 /* Need to reset this to zero for all iterations */
5242                 new_fltr.flag = 0;
5243                 if (is_tx_fltr) {
5244                         new_fltr.flag |= ICE_FLTR_TX;
5245                         new_fltr.src = hw_vsi_id;
5246                 } else {
5247                         new_fltr.flag |= ICE_FLTR_RX;
5248                         new_fltr.src = lport;
5249                 }
5250
5251                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5252                 new_fltr.vsi_handle = vsi_handle;
5253                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5254                 f_list_entry.fltr_info = new_fltr;
5255                 recp_list = &sw->recp_list[recipe_id];
5256
5257                 status = ice_add_rule_internal(hw, recp_list, lport,
5258                                                &f_list_entry);
5259                 if (status != ICE_SUCCESS)
5260                         goto set_promisc_exit;
5261         }
5262
5263 set_promisc_exit:
5264         return status;
5265 }
5266
5267 /**
5268  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5269  * @hw: pointer to the hardware structure
5270  * @vsi_handle: VSI handle to configure
5271  * @promisc_mask: mask of promiscuous config bits
5272  * @vid: VLAN ID to set VLAN promiscuous
5273  */
5274 enum ice_status
5275 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5276                     u16 vid)
5277 {
5278         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5279                                     hw->port_info->lport,
5280                                     hw->switch_info);
5281 }
5282
5283 /**
5284  * _ice_set_vlan_vsi_promisc
5285  * @hw: pointer to the hardware structure
5286  * @vsi_handle: VSI handle to configure
5287  * @promisc_mask: mask of promiscuous config bits
5288  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5289  * @lport: logical port number to configure promisc mode
5290  * @sw: pointer to switch info struct for which function add rule
5291  *
5292  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5293  */
5294 static enum ice_status
5295 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5296                           bool rm_vlan_promisc, u8 lport,
5297                           struct ice_switch_info *sw)
5298 {
5299         struct ice_fltr_list_entry *list_itr, *tmp;
5300         struct LIST_HEAD_TYPE vsi_list_head;
5301         struct LIST_HEAD_TYPE *vlan_head;
5302         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5303         enum ice_status status;
5304         u16 vlan_id;
5305
5306         INIT_LIST_HEAD(&vsi_list_head);
5307         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5308         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5309         ice_acquire_lock(vlan_lock);
5310         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5311                                           &vsi_list_head);
5312         ice_release_lock(vlan_lock);
5313         if (status)
5314                 goto free_fltr_list;
5315
5316         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5317                             list_entry) {
5318                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5319                 if (rm_vlan_promisc)
5320                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
5321                                                          promisc_mask,
5322                                                          vlan_id, sw);
5323                 else
5324                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
5325                                                        promisc_mask, vlan_id,
5326                                                        lport, sw);
5327                 if (status)
5328                         break;
5329         }
5330
5331 free_fltr_list:
5332         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5333                                  ice_fltr_list_entry, list_entry) {
5334                 LIST_DEL(&list_itr->list_entry);
5335                 ice_free(hw, list_itr);
5336         }
5337         return status;
5338 }
5339
5340 /**
5341  * ice_set_vlan_vsi_promisc
5342  * @hw: pointer to the hardware structure
5343  * @vsi_handle: VSI handle to configure
5344  * @promisc_mask: mask of promiscuous config bits
5345  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5346  *
5347  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5348  */
5349 enum ice_status
5350 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5351                          bool rm_vlan_promisc)
5352 {
5353         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5354                                          rm_vlan_promisc, hw->port_info->lport,
5355                                          hw->switch_info);
5356 }
5357
5358 /**
5359  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5360  * @hw: pointer to the hardware structure
5361  * @vsi_handle: VSI handle to remove filters from
5362  * @recp_list: recipe list from which function remove fltr
5363  * @lkup: switch rule filter lookup type
5364  */
5365 static void
5366 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5367                          struct ice_sw_recipe *recp_list,
5368                          enum ice_sw_lkup_type lkup)
5369 {
5370         struct ice_fltr_list_entry *fm_entry;
5371         struct LIST_HEAD_TYPE remove_list_head;
5372         struct LIST_HEAD_TYPE *rule_head;
5373         struct ice_fltr_list_entry *tmp;
5374         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5375         enum ice_status status;
5376
5377         INIT_LIST_HEAD(&remove_list_head);
5378         rule_lock = &recp_list[lkup].filt_rule_lock;
5379         rule_head = &recp_list[lkup].filt_rules;
5380         ice_acquire_lock(rule_lock);
5381         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5382                                           &remove_list_head);
5383         ice_release_lock(rule_lock);
5384         if (status)
5385                 return;
5386
5387         switch (lkup) {
5388         case ICE_SW_LKUP_MAC:
5389                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5390                 break;
5391         case ICE_SW_LKUP_VLAN:
5392                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5393                 break;
5394         case ICE_SW_LKUP_PROMISC:
5395         case ICE_SW_LKUP_PROMISC_VLAN:
5396                 ice_remove_promisc(hw, lkup, &remove_list_head);
5397                 break;
5398         case ICE_SW_LKUP_MAC_VLAN:
5399                 ice_remove_mac_vlan(hw, &remove_list_head);
5400                 break;
5401         case ICE_SW_LKUP_ETHERTYPE:
5402         case ICE_SW_LKUP_ETHERTYPE_MAC:
5403                 ice_remove_eth_mac(hw, &remove_list_head);
5404                 break;
5405         case ICE_SW_LKUP_DFLT:
5406                 ice_debug(hw, ICE_DBG_SW,
5407                           "Remove filters for this lookup type hasn't been implemented yet\n");
5408                 break;
5409         case ICE_SW_LKUP_LAST:
5410                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5411                 break;
5412         }
5413
5414         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5415                                  ice_fltr_list_entry, list_entry) {
5416                 LIST_DEL(&fm_entry->list_entry);
5417                 ice_free(hw, fm_entry);
5418         }
5419 }
5420
5421 /**
5422  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5423  * @hw: pointer to the hardware structure
5424  * @vsi_handle: VSI handle to remove filters from
5425  * @sw: pointer to switch info struct
5426  */
5427 static void
5428 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5429                          struct ice_switch_info *sw)
5430 {
5431         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5432
5433         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5434                                  sw->recp_list, ICE_SW_LKUP_MAC);
5435         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5436                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5437         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5438                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
5439         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5440                                  sw->recp_list, ICE_SW_LKUP_VLAN);
5441         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5442                                  sw->recp_list, ICE_SW_LKUP_DFLT);
5443         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5444                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5445         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5446                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5447         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5448                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5449 }
5450
5451 /**
5452  * ice_remove_vsi_fltr - Remove all filters for a VSI
5453  * @hw: pointer to the hardware structure
5454  * @vsi_handle: VSI handle to remove filters from
5455  */
5456 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5457 {
5458         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5459 }
5460
5461 /**
5462  * ice_alloc_res_cntr - allocating resource counter
5463  * @hw: pointer to the hardware structure
5464  * @type: type of resource
5465  * @alloc_shared: if set it is shared else dedicated
5466  * @num_items: number of entries requested for FD resource type
5467  * @counter_id: counter index returned by AQ call
5468  */
5469 enum ice_status
5470 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5471                    u16 *counter_id)
5472 {
5473         struct ice_aqc_alloc_free_res_elem *buf;
5474         enum ice_status status;
5475         u16 buf_len;
5476
5477         /* Allocate resource */
5478         buf_len = ice_struct_size(buf, elem, 1);
5479         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5480         if (!buf)
5481                 return ICE_ERR_NO_MEMORY;
5482
5483         buf->num_elems = CPU_TO_LE16(num_items);
5484         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5485                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5486
5487         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5488                                        ice_aqc_opc_alloc_res, NULL);
5489         if (status)
5490                 goto exit;
5491
5492         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5493
5494 exit:
5495         ice_free(hw, buf);
5496         return status;
5497 }
5498
5499 /**
5500  * ice_free_res_cntr - free resource counter
5501  * @hw: pointer to the hardware structure
5502  * @type: type of resource
5503  * @alloc_shared: if set it is shared else dedicated
5504  * @num_items: number of entries to be freed for FD resource type
5505  * @counter_id: counter ID resource which needs to be freed
5506  */
5507 enum ice_status
5508 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5509                   u16 counter_id)
5510 {
5511         struct ice_aqc_alloc_free_res_elem *buf;
5512         enum ice_status status;
5513         u16 buf_len;
5514
5515         /* Free resource */
5516         buf_len = ice_struct_size(buf, elem, 1);
5517         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5518         if (!buf)
5519                 return ICE_ERR_NO_MEMORY;
5520
5521         buf->num_elems = CPU_TO_LE16(num_items);
5522         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5523                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5524         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5525
5526         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5527                                        ice_aqc_opc_free_res, NULL);
5528         if (status)
5529                 ice_debug(hw, ICE_DBG_SW,
5530                           "counter resource could not be freed\n");
5531
5532         ice_free(hw, buf);
5533         return status;
5534 }
5535
5536 /**
5537  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5538  * @hw: pointer to the hardware structure
5539  * @counter_id: returns counter index
5540  */
5541 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5542 {
5543         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5544                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5545                                   counter_id);
5546 }
5547
5548 /**
5549  * ice_free_vlan_res_counter - Free counter resource for VLAN type
5550  * @hw: pointer to the hardware structure
5551  * @counter_id: counter index to be freed
5552  */
5553 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5554 {
5555         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5556                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5557                                  counter_id);
5558 }
5559
5560 /**
5561  * ice_alloc_res_lg_act - add large action resource
5562  * @hw: pointer to the hardware structure
5563  * @l_id: large action ID to fill it in
5564  * @num_acts: number of actions to hold with a large action entry
5565  */
5566 static enum ice_status
5567 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5568 {
5569         struct ice_aqc_alloc_free_res_elem *sw_buf;
5570         enum ice_status status;
5571         u16 buf_len;
5572
5573         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5574                 return ICE_ERR_PARAM;
5575
5576         /* Allocate resource for large action */
5577         buf_len = ice_struct_size(sw_buf, elem, 1);
5578         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5579         if (!sw_buf)
5580                 return ICE_ERR_NO_MEMORY;
5581
5582         sw_buf->num_elems = CPU_TO_LE16(1);
5583
5584         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5585          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5586          * If num_acts is greater than 2, then use
5587          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5588          * The num_acts cannot exceed 4. This was ensured at the
5589          * beginning of the function.
5590          */
5591         if (num_acts == 1)
5592                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5593         else if (num_acts == 2)
5594                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5595         else
5596                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5597
5598         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5599                                        ice_aqc_opc_alloc_res, NULL);
5600         if (!status)
5601                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5602
5603         ice_free(hw, sw_buf);
5604         return status;
5605 }
5606
5607 /**
5608  * ice_add_mac_with_sw_marker - add filter with sw marker
5609  * @hw: pointer to the hardware structure
5610  * @f_info: filter info structure containing the MAC filter information
5611  * @sw_marker: sw marker to tag the Rx descriptor with
5612  */
5613 enum ice_status
5614 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5615                            u16 sw_marker)
5616 {
5617         struct ice_fltr_mgmt_list_entry *m_entry;
5618         struct ice_fltr_list_entry fl_info;
5619         struct ice_sw_recipe *recp_list;
5620         struct LIST_HEAD_TYPE l_head;
5621         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5622         enum ice_status ret;
5623         bool entry_exists;
5624         u16 lg_act_id;
5625
5626         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5627                 return ICE_ERR_PARAM;
5628
5629         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5630                 return ICE_ERR_PARAM;
5631
5632         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5633                 return ICE_ERR_PARAM;
5634
5635         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5636                 return ICE_ERR_PARAM;
5637         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5638
5639         /* Add filter if it doesn't exist so then the adding of large
5640          * action always results in update
5641          */
5642
5643         INIT_LIST_HEAD(&l_head);
5644         fl_info.fltr_info = *f_info;
5645         LIST_ADD(&fl_info.list_entry, &l_head);
5646
5647         entry_exists = false;
5648         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5649                                hw->port_info->lport);
5650         if (ret == ICE_ERR_ALREADY_EXISTS)
5651                 entry_exists = true;
5652         else if (ret)
5653                 return ret;
5654
5655         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5656         rule_lock = &recp_list->filt_rule_lock;
5657         ice_acquire_lock(rule_lock);
5658         /* Get the book keeping entry for the filter */
5659         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5660         if (!m_entry)
5661                 goto exit_error;
5662
5663         /* If counter action was enabled for this rule then don't enable
5664          * sw marker large action
5665          */
5666         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5667                 ret = ICE_ERR_PARAM;
5668                 goto exit_error;
5669         }
5670
5671         /* if same marker was added before */
5672         if (m_entry->sw_marker_id == sw_marker) {
5673                 ret = ICE_ERR_ALREADY_EXISTS;
5674                 goto exit_error;
5675         }
5676
5677         /* Allocate a hardware table entry to hold large act. Three actions
5678          * for marker based large action
5679          */
5680         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5681         if (ret)
5682                 goto exit_error;
5683
5684         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5685                 goto exit_error;
5686
5687         /* Update the switch rule to add the marker action */
5688         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5689         if (!ret) {
5690                 ice_release_lock(rule_lock);
5691                 return ret;
5692         }
5693
5694 exit_error:
5695         ice_release_lock(rule_lock);
5696         /* only remove entry if it did not exist previously */
5697         if (!entry_exists)
5698                 ret = ice_remove_mac(hw, &l_head);
5699
5700         return ret;
5701 }
5702
5703 /**
5704  * ice_add_mac_with_counter - add filter with counter enabled
5705  * @hw: pointer to the hardware structure
5706  * @f_info: pointer to filter info structure containing the MAC filter
5707  *          information
5708  */
5709 enum ice_status
5710 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5711 {
5712         struct ice_fltr_mgmt_list_entry *m_entry;
5713         struct ice_fltr_list_entry fl_info;
5714         struct ice_sw_recipe *recp_list;
5715         struct LIST_HEAD_TYPE l_head;
5716         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5717         enum ice_status ret;
5718         bool entry_exist;
5719         u16 counter_id;
5720         u16 lg_act_id;
5721
5722         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5723                 return ICE_ERR_PARAM;
5724
5725         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5726                 return ICE_ERR_PARAM;
5727
5728         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5729                 return ICE_ERR_PARAM;
5730         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5731         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5732
5733         entry_exist = false;
5734
5735         rule_lock = &recp_list->filt_rule_lock;
5736
5737         /* Add filter if it doesn't exist so then the adding of large
5738          * action always results in update
5739          */
5740         INIT_LIST_HEAD(&l_head);
5741
5742         fl_info.fltr_info = *f_info;
5743         LIST_ADD(&fl_info.list_entry, &l_head);
5744
5745         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5746                                hw->port_info->lport);
5747         if (ret == ICE_ERR_ALREADY_EXISTS)
5748                 entry_exist = true;
5749         else if (ret)
5750                 return ret;
5751
5752         ice_acquire_lock(rule_lock);
5753         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5754         if (!m_entry) {
5755                 ret = ICE_ERR_BAD_PTR;
5756                 goto exit_error;
5757         }
5758
5759         /* Don't enable counter for a filter for which sw marker was enabled */
5760         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5761                 ret = ICE_ERR_PARAM;
5762                 goto exit_error;
5763         }
5764
5765         /* If a counter was already enabled then don't need to add again */
5766         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5767                 ret = ICE_ERR_ALREADY_EXISTS;
5768                 goto exit_error;
5769         }
5770
5771         /* Allocate a hardware table entry to VLAN counter */
5772         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5773         if (ret)
5774                 goto exit_error;
5775
5776         /* Allocate a hardware table entry to hold large act. Two actions for
5777          * counter based large action
5778          */
5779         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5780         if (ret)
5781                 goto exit_error;
5782
5783         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5784                 goto exit_error;
5785
5786         /* Update the switch rule to add the counter action */
5787         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5788         if (!ret) {
5789                 ice_release_lock(rule_lock);
5790                 return ret;
5791         }
5792
5793 exit_error:
5794         ice_release_lock(rule_lock);
5795         /* only remove entry if it did not exist previously */
5796         if (!entry_exist)
5797                 ret = ice_remove_mac(hw, &l_head);
5798
5799         return ret;
5800 }
5801
5802 /* This is mapping table entry that maps every word within a given protocol
5803  * structure to the real byte offset as per the specification of that
5804  * protocol header.
5805  * for example dst address is 3 words in ethertype header and corresponding
5806  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5807  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5808  * matching entry describing its field. This needs to be updated if new
5809  * structure is added to that union.
5810  */
5811 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5812         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
5813         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
5814         { ICE_ETYPE_OL,         { 0 } },
5815         { ICE_VLAN_OFOS,        { 0, 2 } },
5816         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5817         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5818         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5819                                  26, 28, 30, 32, 34, 36, 38 } },
5820         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5821                                  26, 28, 30, 32, 34, 36, 38 } },
5822         { ICE_TCP_IL,           { 0, 2 } },
5823         { ICE_UDP_OF,           { 0, 2 } },
5824         { ICE_UDP_ILOS,         { 0, 2 } },
5825         { ICE_SCTP_IL,          { 0, 2 } },
5826         { ICE_VXLAN,            { 8, 10, 12, 14 } },
5827         { ICE_GENEVE,           { 8, 10, 12, 14 } },
5828         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
5829         { ICE_NVGRE,            { 0, 2, 4, 6 } },
5830         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20 } },
5831         { ICE_PPPOE,            { 0, 2, 4, 6 } },
5832         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
5833         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
5834         { ICE_ESP,              { 0, 2, 4, 6 } },
5835         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
5836         { ICE_NAT_T,            { 8, 10, 12, 14 } },
5837         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
5838 };
5839
5840 /* The following table describes preferred grouping of recipes.
5841  * If a recipe that needs to be programmed is a superset or matches one of the
5842  * following combinations, then the recipe needs to be chained as per the
5843  * following policy.
5844  */
5845
5846 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5847         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
5848         { ICE_MAC_IL,           ICE_MAC_IL_HW },
5849         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
5850         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
5851         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
5852         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
5853         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
5854         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
5855         { ICE_TCP_IL,           ICE_TCP_IL_HW },
5856         { ICE_UDP_OF,           ICE_UDP_OF_HW },
5857         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
5858         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
5859         { ICE_VXLAN,            ICE_UDP_OF_HW },
5860         { ICE_GENEVE,           ICE_UDP_OF_HW },
5861         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
5862         { ICE_NVGRE,            ICE_GRE_OF_HW },
5863         { ICE_GTP,              ICE_UDP_OF_HW },
5864         { ICE_PPPOE,            ICE_PPPOE_HW },
5865         { ICE_PFCP,             ICE_UDP_ILOS_HW },
5866         { ICE_L2TPV3,           ICE_L2TPV3_HW },
5867         { ICE_ESP,              ICE_ESP_HW },
5868         { ICE_AH,               ICE_AH_HW },
5869         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
5870         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
5871 };
5872
5873 /**
5874  * ice_find_recp - find a recipe
5875  * @hw: pointer to the hardware structure
5876  * @lkup_exts: extension sequence to match
5877  *
5878  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5879  */
5880 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5881                          enum ice_sw_tunnel_type tun_type)
5882 {
5883         bool refresh_required = true;
5884         struct ice_sw_recipe *recp;
5885         u8 i;
5886
5887         /* Walk through existing recipes to find a match */
5888         recp = hw->switch_info->recp_list;
5889         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5890                 /* If recipe was not created for this ID, in SW bookkeeping,
5891                  * check if FW has an entry for this recipe. If the FW has an
5892                  * entry update it in our SW bookkeeping and continue with the
5893                  * matching.
5894                  */
5895                 if (!recp[i].recp_created)
5896                         if (ice_get_recp_frm_fw(hw,
5897                                                 hw->switch_info->recp_list, i,
5898                                                 &refresh_required))
5899                                 continue;
5900
5901                 /* Skip inverse action recipes */
5902                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5903                     ICE_AQ_RECIPE_ACT_INV_ACT)
5904                         continue;
5905
5906                 /* if number of words we are looking for match */
5907                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5908                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5909                         struct ice_fv_word *be = lkup_exts->fv_words;
5910                         u16 *cr = recp[i].lkup_exts.field_mask;
5911                         u16 *de = lkup_exts->field_mask;
5912                         bool found = true;
5913                         u8 pe, qr;
5914
5915                         /* ar, cr, and qr are related to the recipe words, while
5916                          * be, de, and pe are related to the lookup words
5917                          */
5918                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5919                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5920                                      qr++) {
5921                                         if (ar[qr].off == be[pe].off &&
5922                                             ar[qr].prot_id == be[pe].prot_id &&
5923                                             cr[qr] == de[pe])
5924                                                 /* Found the "pe"th word in the
5925                                                  * given recipe
5926                                                  */
5927                                                 break;
5928                                 }
5929                                 /* After walking through all the words in the
5930                                  * "i"th recipe if "p"th word was not found then
5931                                  * this recipe is not what we are looking for.
5932                                  * So break out from this loop and try the next
5933                                  * recipe
5934                                  */
5935                                 if (qr >= recp[i].lkup_exts.n_val_words) {
5936                                         found = false;
5937                                         break;
5938                                 }
5939                         }
5940                         /* If for "i"th recipe the found was never set to false
5941                          * then it means we found our match
5942                          */
5943                         if (tun_type == recp[i].tun_type && found)
5944                                 return i; /* Return the recipe ID */
5945                 }
5946         }
5947         return ICE_MAX_NUM_RECIPES;
5948 }
5949
5950 /**
5951  * ice_prot_type_to_id - get protocol ID from protocol type
5952  * @type: protocol type
5953  * @id: pointer to variable that will receive the ID
5954  *
5955  * Returns true if found, false otherwise
5956  */
5957 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5958 {
5959         u8 i;
5960
5961         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5962                 if (ice_prot_id_tbl[i].type == type) {
5963                         *id = ice_prot_id_tbl[i].protocol_id;
5964                         return true;
5965                 }
5966         return false;
5967 }
5968
5969 /**
5970  * ice_find_valid_words - count valid words
5971  * @rule: advanced rule with lookup information
5972  * @lkup_exts: byte offset extractions of the words that are valid
5973  *
5974  * calculate valid words in a lookup rule using mask value
5975  */
5976 static u8
5977 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5978                      struct ice_prot_lkup_ext *lkup_exts)
5979 {
5980         u8 j, word, prot_id, ret_val;
5981
5982         if (!ice_prot_type_to_id(rule->type, &prot_id))
5983                 return 0;
5984
5985         word = lkup_exts->n_val_words;
5986
5987         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5988                 if (((u16 *)&rule->m_u)[j] &&
5989                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
5990                         /* No more space to accommodate */
5991                         if (word >= ICE_MAX_CHAIN_WORDS)
5992                                 return 0;
5993                         lkup_exts->fv_words[word].off =
5994                                 ice_prot_ext[rule->type].offs[j];
5995                         lkup_exts->fv_words[word].prot_id =
5996                                 ice_prot_id_tbl[rule->type].protocol_id;
5997                         lkup_exts->field_mask[word] =
5998                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
5999                         word++;
6000                 }
6001
6002         ret_val = word - lkup_exts->n_val_words;
6003         lkup_exts->n_val_words = word;
6004
6005         return ret_val;
6006 }
6007
6008 /**
6009  * ice_create_first_fit_recp_def - Create a recipe grouping
6010  * @hw: pointer to the hardware structure
6011  * @lkup_exts: an array of protocol header extractions
6012  * @rg_list: pointer to a list that stores new recipe groups
6013  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6014  *
6015  * Using first fit algorithm, take all the words that are still not done
6016  * and start grouping them in 4-word groups. Each group makes up one
6017  * recipe.
6018  */
6019 static enum ice_status
6020 ice_create_first_fit_recp_def(struct ice_hw *hw,
6021                               struct ice_prot_lkup_ext *lkup_exts,
6022                               struct LIST_HEAD_TYPE *rg_list,
6023                               u8 *recp_cnt)
6024 {
6025         struct ice_pref_recipe_group *grp = NULL;
6026         u8 j;
6027
6028         *recp_cnt = 0;
6029
6030         if (!lkup_exts->n_val_words) {
6031                 struct ice_recp_grp_entry *entry;
6032
6033                 entry = (struct ice_recp_grp_entry *)
6034                         ice_malloc(hw, sizeof(*entry));
6035                 if (!entry)
6036                         return ICE_ERR_NO_MEMORY;
6037                 LIST_ADD(&entry->l_entry, rg_list);
6038                 grp = &entry->r_group;
6039                 (*recp_cnt)++;
6040                 grp->n_val_pairs = 0;
6041         }
6042
6043         /* Walk through every word in the rule to check if it is not done. If so
6044          * then this word needs to be part of a new recipe.
6045          */
6046         for (j = 0; j < lkup_exts->n_val_words; j++)
6047                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6048                         if (!grp ||
6049                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6050                                 struct ice_recp_grp_entry *entry;
6051
6052                                 entry = (struct ice_recp_grp_entry *)
6053                                         ice_malloc(hw, sizeof(*entry));
6054                                 if (!entry)
6055                                         return ICE_ERR_NO_MEMORY;
6056                                 LIST_ADD(&entry->l_entry, rg_list);
6057                                 grp = &entry->r_group;
6058                                 (*recp_cnt)++;
6059                         }
6060
6061                         grp->pairs[grp->n_val_pairs].prot_id =
6062                                 lkup_exts->fv_words[j].prot_id;
6063                         grp->pairs[grp->n_val_pairs].off =
6064                                 lkup_exts->fv_words[j].off;
6065                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6066                         grp->n_val_pairs++;
6067                 }
6068
6069         return ICE_SUCCESS;
6070 }
6071
6072 /**
6073  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6074  * @hw: pointer to the hardware structure
6075  * @fv_list: field vector with the extraction sequence information
6076  * @rg_list: recipe groupings with protocol-offset pairs
6077  *
6078  * Helper function to fill in the field vector indices for protocol-offset
6079  * pairs. These indexes are then ultimately programmed into a recipe.
6080  */
6081 static enum ice_status
6082 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6083                        struct LIST_HEAD_TYPE *rg_list)
6084 {
6085         struct ice_sw_fv_list_entry *fv;
6086         struct ice_recp_grp_entry *rg;
6087         struct ice_fv_word *fv_ext;
6088
6089         if (LIST_EMPTY(fv_list))
6090                 return ICE_SUCCESS;
6091
6092         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6093         fv_ext = fv->fv_ptr->ew;
6094
6095         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6096                 u8 i;
6097
6098                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6099                         struct ice_fv_word *pr;
6100                         bool found = false;
6101                         u16 mask;
6102                         u8 j;
6103
6104                         pr = &rg->r_group.pairs[i];
6105                         mask = rg->r_group.mask[i];
6106
6107                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6108                                 if (fv_ext[j].prot_id == pr->prot_id &&
6109                                     fv_ext[j].off == pr->off) {
6110                                         found = true;
6111
6112                                         /* Store index of field vector */
6113                                         rg->fv_idx[i] = j;
6114                                         rg->fv_mask[i] = mask;
6115                                         break;
6116                                 }
6117
6118                         /* Protocol/offset could not be found, caller gave an
6119                          * invalid pair
6120                          */
6121                         if (!found)
6122                                 return ICE_ERR_PARAM;
6123                 }
6124         }
6125
6126         return ICE_SUCCESS;
6127 }
6128
6129 /**
6130  * ice_find_free_recp_res_idx - find free result indexes for recipe
6131  * @hw: pointer to hardware structure
6132  * @profiles: bitmap of profiles that will be associated with the new recipe
6133  * @free_idx: pointer to variable to receive the free index bitmap
6134  *
6135  * The algorithm used here is:
6136  *      1. When creating a new recipe, create a set P which contains all
6137  *         Profiles that will be associated with our new recipe
6138  *
6139  *      2. For each Profile p in set P:
6140  *          a. Add all recipes associated with Profile p into set R
6141  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6142  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6143  *              i. Or just assume they all have the same possible indexes:
6144  *                      44, 45, 46, 47
6145  *                      i.e., PossibleIndexes = 0x0000F00000000000
6146  *
6147  *      3. For each Recipe r in set R:
6148  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6149  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6150  *
6151  *      FreeIndexes will contain the bits indicating the indexes free for use,
6152  *      then the code needs to update the recipe[r].used_result_idx_bits to
6153  *      indicate which indexes were selected for use by this recipe.
6154  */
6155 static u16
6156 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6157                            ice_bitmap_t *free_idx)
6158 {
6159         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6160         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6161         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6162         u16 bit;
6163
6164         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6165         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6166         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6167         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6168
6169         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6170
6171         /* For each profile we are going to associate the recipe with, add the
6172          * recipes that are associated with that profile. This will give us
6173          * the set of recipes that our recipe may collide with. Also, determine
6174          * what possible result indexes are usable given this set of profiles.
6175          */
6176         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6177                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6178                               ICE_MAX_NUM_RECIPES);
6179                 ice_and_bitmap(possible_idx, possible_idx,
6180                                hw->switch_info->prof_res_bm[bit],
6181                                ICE_MAX_FV_WORDS);
6182         }
6183
6184         /* For each recipe that our new recipe may collide with, determine
6185          * which indexes have been used.
6186          */
6187         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6188                 ice_or_bitmap(used_idx, used_idx,
6189                               hw->switch_info->recp_list[bit].res_idxs,
6190                               ICE_MAX_FV_WORDS);
6191
6192         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6193
6194         /* return number of free indexes */
6195         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6196 }
6197
6198 /**
6199  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6200  * @hw: pointer to hardware structure
6201  * @rm: recipe management list entry
6202  * @profiles: bitmap of profiles that will be associated.
6203  */
6204 static enum ice_status
6205 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6206                   ice_bitmap_t *profiles)
6207 {
6208         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6209         struct ice_aqc_recipe_data_elem *tmp;
6210         struct ice_aqc_recipe_data_elem *buf;
6211         struct ice_recp_grp_entry *entry;
6212         enum ice_status status;
6213         u16 free_res_idx;
6214         u16 recipe_count;
6215         u8 chain_idx;
6216         u8 recps = 0;
6217
6218         /* When more than one recipe are required, another recipe is needed to
6219          * chain them together. Matching a tunnel metadata ID takes up one of
6220          * the match fields in the chaining recipe reducing the number of
6221          * chained recipes by one.
6222          */
6223          /* check number of free result indices */
6224         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6225         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6226
6227         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6228                   free_res_idx, rm->n_grp_count);
6229
6230         if (rm->n_grp_count > 1) {
6231                 if (rm->n_grp_count > free_res_idx)
6232                         return ICE_ERR_MAX_LIMIT;
6233
6234                 rm->n_grp_count++;
6235         }
6236
6237         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6238                 return ICE_ERR_MAX_LIMIT;
6239
6240         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6241                                                             ICE_MAX_NUM_RECIPES,
6242                                                             sizeof(*tmp));
6243         if (!tmp)
6244                 return ICE_ERR_NO_MEMORY;
6245
6246         buf = (struct ice_aqc_recipe_data_elem *)
6247                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6248         if (!buf) {
6249                 status = ICE_ERR_NO_MEMORY;
6250                 goto err_mem;
6251         }
6252
6253         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6254         recipe_count = ICE_MAX_NUM_RECIPES;
6255         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6256                                    NULL);
6257         if (status || recipe_count == 0)
6258                 goto err_unroll;
6259
6260         /* Allocate the recipe resources, and configure them according to the
6261          * match fields from protocol headers and extracted field vectors.
6262          */
6263         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6264         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6265                 u8 i;
6266
6267                 status = ice_alloc_recipe(hw, &entry->rid);
6268                 if (status)
6269                         goto err_unroll;
6270
6271                 /* Clear the result index of the located recipe, as this will be
6272                  * updated, if needed, later in the recipe creation process.
6273                  */
6274                 tmp[0].content.result_indx = 0;
6275
6276                 buf[recps] = tmp[0];
6277                 buf[recps].recipe_indx = (u8)entry->rid;
6278                 /* if the recipe is a non-root recipe RID should be programmed
6279                  * as 0 for the rules to be applied correctly.
6280                  */
6281                 buf[recps].content.rid = 0;
6282                 ice_memset(&buf[recps].content.lkup_indx, 0,
6283                            sizeof(buf[recps].content.lkup_indx),
6284                            ICE_NONDMA_MEM);
6285
6286                 /* All recipes use look-up index 0 to match switch ID. */
6287                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6288                 buf[recps].content.mask[0] =
6289                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6290                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6291                  * to be 0
6292                  */
6293                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6294                         buf[recps].content.lkup_indx[i] = 0x80;
6295                         buf[recps].content.mask[i] = 0;
6296                 }
6297
6298                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6299                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6300                         buf[recps].content.mask[i + 1] =
6301                                 CPU_TO_LE16(entry->fv_mask[i]);
6302                 }
6303
6304                 if (rm->n_grp_count > 1) {
6305                         /* Checks to see if there really is a valid result index
6306                          * that can be used.
6307                          */
6308                         if (chain_idx >= ICE_MAX_FV_WORDS) {
6309                                 ice_debug(hw, ICE_DBG_SW,
6310                                           "No chain index available\n");
6311                                 status = ICE_ERR_MAX_LIMIT;
6312                                 goto err_unroll;
6313                         }
6314
6315                         entry->chain_idx = chain_idx;
6316                         buf[recps].content.result_indx =
6317                                 ICE_AQ_RECIPE_RESULT_EN |
6318                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6319                                  ICE_AQ_RECIPE_RESULT_DATA_M);
6320                         ice_clear_bit(chain_idx, result_idx_bm);
6321                         chain_idx = ice_find_first_bit(result_idx_bm,
6322                                                        ICE_MAX_FV_WORDS);
6323                 }
6324
6325                 /* fill recipe dependencies */
6326                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6327                                 ICE_MAX_NUM_RECIPES);
6328                 ice_set_bit(buf[recps].recipe_indx,
6329                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
6330                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6331                 recps++;
6332         }
6333
6334         if (rm->n_grp_count == 1) {
6335                 rm->root_rid = buf[0].recipe_indx;
6336                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6337                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6338                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6339                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6340                                    sizeof(buf[0].recipe_bitmap),
6341                                    ICE_NONDMA_TO_NONDMA);
6342                 } else {
6343                         status = ICE_ERR_BAD_PTR;
6344                         goto err_unroll;
6345                 }
6346                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6347                  * the recipe which is getting created if specified
6348                  * by user. Usually any advanced switch filter, which results
6349                  * into new extraction sequence, ended up creating a new recipe
6350                  * of type ROOT and usually recipes are associated with profiles
6351                  * Switch rule referreing newly created recipe, needs to have
6352                  * either/or 'fwd' or 'join' priority, otherwise switch rule
6353                  * evaluation will not happen correctly. In other words, if
6354                  * switch rule to be evaluated on priority basis, then recipe
6355                  * needs to have priority, otherwise it will be evaluated last.
6356                  */
6357                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6358         } else {
6359                 struct ice_recp_grp_entry *last_chain_entry;
6360                 u16 rid, i;
6361
6362                 /* Allocate the last recipe that will chain the outcomes of the
6363                  * other recipes together
6364                  */
6365                 status = ice_alloc_recipe(hw, &rid);
6366                 if (status)
6367                         goto err_unroll;
6368
6369                 buf[recps].recipe_indx = (u8)rid;
6370                 buf[recps].content.rid = (u8)rid;
6371                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6372                 /* the new entry created should also be part of rg_list to
6373                  * make sure we have complete recipe
6374                  */
6375                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6376                         sizeof(*last_chain_entry));
6377                 if (!last_chain_entry) {
6378                         status = ICE_ERR_NO_MEMORY;
6379                         goto err_unroll;
6380                 }
6381                 last_chain_entry->rid = rid;
6382                 ice_memset(&buf[recps].content.lkup_indx, 0,
6383                            sizeof(buf[recps].content.lkup_indx),
6384                            ICE_NONDMA_MEM);
6385                 /* All recipes use look-up index 0 to match switch ID. */
6386                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6387                 buf[recps].content.mask[0] =
6388                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6389                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6390                         buf[recps].content.lkup_indx[i] =
6391                                 ICE_AQ_RECIPE_LKUP_IGNORE;
6392                         buf[recps].content.mask[i] = 0;
6393                 }
6394
6395                 i = 1;
6396                 /* update r_bitmap with the recp that is used for chaining */
6397                 ice_set_bit(rid, rm->r_bitmap);
6398                 /* this is the recipe that chains all the other recipes so it
6399                  * should not have a chaining ID to indicate the same
6400                  */
6401                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6402                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6403                                     l_entry) {
6404                         last_chain_entry->fv_idx[i] = entry->chain_idx;
6405                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
6406                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6407                         ice_set_bit(entry->rid, rm->r_bitmap);
6408                 }
6409                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6410                 if (sizeof(buf[recps].recipe_bitmap) >=
6411                     sizeof(rm->r_bitmap)) {
6412                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6413                                    sizeof(buf[recps].recipe_bitmap),
6414                                    ICE_NONDMA_TO_NONDMA);
6415                 } else {
6416                         status = ICE_ERR_BAD_PTR;
6417                         goto err_unroll;
6418                 }
6419                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6420
6421                 recps++;
6422                 rm->root_rid = (u8)rid;
6423         }
6424         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6425         if (status)
6426                 goto err_unroll;
6427
6428         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6429         ice_release_change_lock(hw);
6430         if (status)
6431                 goto err_unroll;
6432
6433         /* Every recipe that just got created add it to the recipe
6434          * book keeping list
6435          */
6436         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6437                 struct ice_switch_info *sw = hw->switch_info;
6438                 bool is_root, idx_found = false;
6439                 struct ice_sw_recipe *recp;
6440                 u16 idx, buf_idx = 0;
6441
6442                 /* find buffer index for copying some data */
6443                 for (idx = 0; idx < rm->n_grp_count; idx++)
6444                         if (buf[idx].recipe_indx == entry->rid) {
6445                                 buf_idx = idx;
6446                                 idx_found = true;
6447                         }
6448
6449                 if (!idx_found) {
6450                         status = ICE_ERR_OUT_OF_RANGE;
6451                         goto err_unroll;
6452                 }
6453
6454                 recp = &sw->recp_list[entry->rid];
6455                 is_root = (rm->root_rid == entry->rid);
6456                 recp->is_root = is_root;
6457
6458                 recp->root_rid = entry->rid;
6459                 recp->big_recp = (is_root && rm->n_grp_count > 1);
6460
6461                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6462                            entry->r_group.n_val_pairs *
6463                            sizeof(struct ice_fv_word),
6464                            ICE_NONDMA_TO_NONDMA);
6465
6466                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6467                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6468
6469                 /* Copy non-result fv index values and masks to recipe. This
6470                  * call will also update the result recipe bitmask.
6471                  */
6472                 ice_collect_result_idx(&buf[buf_idx], recp);
6473
6474                 /* for non-root recipes, also copy to the root, this allows
6475                  * easier matching of a complete chained recipe
6476                  */
6477                 if (!is_root)
6478                         ice_collect_result_idx(&buf[buf_idx],
6479                                                &sw->recp_list[rm->root_rid]);
6480
6481                 recp->n_ext_words = entry->r_group.n_val_pairs;
6482                 recp->chain_idx = entry->chain_idx;
6483                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6484                 recp->n_grp_count = rm->n_grp_count;
6485                 recp->tun_type = rm->tun_type;
6486                 recp->recp_created = true;
6487         }
6488         rm->root_buf = buf;
6489         ice_free(hw, tmp);
6490         return status;
6491
6492 err_unroll:
6493 err_mem:
6494         ice_free(hw, tmp);
6495         ice_free(hw, buf);
6496         return status;
6497 }
6498
6499 /**
6500  * ice_create_recipe_group - creates recipe group
6501  * @hw: pointer to hardware structure
6502  * @rm: recipe management list entry
6503  * @lkup_exts: lookup elements
6504  */
6505 static enum ice_status
6506 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6507                         struct ice_prot_lkup_ext *lkup_exts)
6508 {
6509         enum ice_status status;
6510         u8 recp_count = 0;
6511
6512         rm->n_grp_count = 0;
6513
6514         /* Create recipes for words that are marked not done by packing them
6515          * as best fit.
6516          */
6517         status = ice_create_first_fit_recp_def(hw, lkup_exts,
6518                                                &rm->rg_list, &recp_count);
6519         if (!status) {
6520                 rm->n_grp_count += recp_count;
6521                 rm->n_ext_words = lkup_exts->n_val_words;
6522                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6523                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6524                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6525                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6526         }
6527
6528         return status;
6529 }
6530
6531 /**
6532  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6533  * @hw: pointer to hardware structure
6534  * @lkups: lookup elements or match criteria for the advanced recipe, one
6535  *         structure per protocol header
6536  * @lkups_cnt: number of protocols
6537  * @bm: bitmap of field vectors to consider
6538  * @fv_list: pointer to a list that holds the returned field vectors
6539  */
6540 static enum ice_status
6541 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6542            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6543 {
6544         enum ice_status status;
6545         u8 *prot_ids;
6546         u16 i;
6547
6548         if (!lkups_cnt)
6549                 return ICE_SUCCESS;
6550
6551         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6552         if (!prot_ids)
6553                 return ICE_ERR_NO_MEMORY;
6554
6555         for (i = 0; i < lkups_cnt; i++)
6556                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6557                         status = ICE_ERR_CFG;
6558                         goto free_mem;
6559                 }
6560
6561         /* Find field vectors that include all specified protocol types */
6562         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6563
6564 free_mem:
6565         ice_free(hw, prot_ids);
6566         return status;
6567 }
6568
6569 /**
6570  * ice_tun_type_match_mask - determine if tun type needs a match mask
6571  * @tun_type: tunnel type
6572  * @mask: mask to be used for the tunnel
6573  */
6574 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6575 {
6576         switch (tun_type) {
6577         case ICE_SW_TUN_VXLAN_GPE:
6578         case ICE_SW_TUN_GENEVE:
6579         case ICE_SW_TUN_VXLAN:
6580         case ICE_SW_TUN_NVGRE:
6581         case ICE_SW_TUN_UDP:
6582         case ICE_ALL_TUNNELS:
6583                 *mask = ICE_TUN_FLAG_MASK;
6584                 return true;
6585
6586         case ICE_SW_TUN_GENEVE_VLAN:
6587         case ICE_SW_TUN_VXLAN_VLAN:
6588                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6589                 return true;
6590
6591         default:
6592                 *mask = 0;
6593                 return false;
6594         }
6595 }
6596
6597 /**
6598  * ice_add_special_words - Add words that are not protocols, such as metadata
6599  * @rinfo: other information regarding the rule e.g. priority and action info
6600  * @lkup_exts: lookup word structure
6601  */
6602 static enum ice_status
6603 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6604                       struct ice_prot_lkup_ext *lkup_exts)
6605 {
6606         u16 mask;
6607
6608         /* If this is a tunneled packet, then add recipe index to match the
6609          * tunnel bit in the packet metadata flags.
6610          */
6611         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6612                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6613                         u8 word = lkup_exts->n_val_words++;
6614
6615                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6616                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6617                         lkup_exts->field_mask[word] = mask;
6618                 } else {
6619                         return ICE_ERR_MAX_LIMIT;
6620                 }
6621         }
6622
6623         return ICE_SUCCESS;
6624 }
6625
6626 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6627  * @hw: pointer to hardware structure
6628  * @rinfo: other information regarding the rule e.g. priority and action info
6629  * @bm: pointer to memory for returning the bitmap of field vectors
6630  */
6631 static void
6632 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6633                          ice_bitmap_t *bm)
6634 {
6635         enum ice_prof_type prof_type;
6636
6637         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6638
6639         switch (rinfo->tun_type) {
6640         case ICE_NON_TUN:
6641                 prof_type = ICE_PROF_NON_TUN;
6642                 break;
6643         case ICE_ALL_TUNNELS:
6644                 prof_type = ICE_PROF_TUN_ALL;
6645                 break;
6646         case ICE_SW_TUN_VXLAN_GPE:
6647         case ICE_SW_TUN_GENEVE:
6648         case ICE_SW_TUN_GENEVE_VLAN:
6649         case ICE_SW_TUN_VXLAN:
6650         case ICE_SW_TUN_VXLAN_VLAN:
6651         case ICE_SW_TUN_UDP:
6652         case ICE_SW_TUN_GTP:
6653                 prof_type = ICE_PROF_TUN_UDP;
6654                 break;
6655         case ICE_SW_TUN_NVGRE:
6656                 prof_type = ICE_PROF_TUN_GRE;
6657                 break;
6658         case ICE_SW_TUN_PPPOE:
6659                 prof_type = ICE_PROF_TUN_PPPOE;
6660                 break;
6661         case ICE_SW_TUN_PPPOE_PAY:
6662                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6663                 return;
6664         case ICE_SW_TUN_PPPOE_IPV4:
6665                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6666                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6667                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6668                 return;
6669         case ICE_SW_TUN_PPPOE_IPV4_TCP:
6670                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6671                 return;
6672         case ICE_SW_TUN_PPPOE_IPV4_UDP:
6673                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6674                 return;
6675         case ICE_SW_TUN_PPPOE_IPV6:
6676                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6677                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6678                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6679                 return;
6680         case ICE_SW_TUN_PPPOE_IPV6_TCP:
6681                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6682                 return;
6683         case ICE_SW_TUN_PPPOE_IPV6_UDP:
6684                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6685                 return;
6686         case ICE_SW_TUN_PROFID_IPV6_ESP:
6687         case ICE_SW_TUN_IPV6_ESP:
6688                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6689                 return;
6690         case ICE_SW_TUN_PROFID_IPV6_AH:
6691         case ICE_SW_TUN_IPV6_AH:
6692                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6693                 return;
6694         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6695         case ICE_SW_TUN_IPV6_L2TPV3:
6696                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6697                 return;
6698         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6699         case ICE_SW_TUN_IPV6_NAT_T:
6700                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6701                 return;
6702         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6703                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6704                 return;
6705         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6706                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6707                 return;
6708         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6709                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6710                 return;
6711         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6712                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6713                 return;
6714         case ICE_SW_TUN_IPV4_NAT_T:
6715                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6716                 return;
6717         case ICE_SW_TUN_IPV4_L2TPV3:
6718                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6719                 return;
6720         case ICE_SW_TUN_IPV4_ESP:
6721                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6722                 return;
6723         case ICE_SW_TUN_IPV4_AH:
6724                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6725                 return;
6726         case ICE_SW_IPV4_TCP:
6727                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6728                 return;
6729         case ICE_SW_IPV4_UDP:
6730                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6731                 return;
6732         case ICE_SW_IPV6_TCP:
6733                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6734                 return;
6735         case ICE_SW_IPV6_UDP:
6736                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6737                 return;
6738         case ICE_SW_TUN_IPV4_GTPU_IPV4:
6739                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6740                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6741                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6742                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6743                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6744                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6745                 return;
6746         case ICE_SW_TUN_IPV6_GTPU_IPV4:
6747                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6748                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6749                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6750                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6751                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6752                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6753                 return;
6754         case ICE_SW_TUN_IPV4_GTPU_IPV6:
6755                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6756                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6757                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6758                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6759                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6760                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6761                 return;
6762         case ICE_SW_TUN_IPV6_GTPU_IPV6:
6763                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6764                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6765                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6766                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6767                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6768                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6769                 return;
6770         case ICE_SW_TUN_AND_NON_TUN:
6771         default:
6772                 prof_type = ICE_PROF_ALL;
6773                 break;
6774         }
6775
6776         ice_get_sw_fv_bitmap(hw, prof_type, bm);
6777 }
6778
6779 /**
6780  * ice_is_prof_rule - determine if rule type is a profile rule
6781  * @type: the rule type
6782  *
6783  * if the rule type is a profile rule, that means that there no field value
6784  * match required, in this case just a profile hit is required.
6785  */
6786 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6787 {
6788         switch (type) {
6789         case ICE_SW_TUN_PROFID_IPV6_ESP:
6790         case ICE_SW_TUN_PROFID_IPV6_AH:
6791         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6792         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6793         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6794         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6795         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6796         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6797                 return true;
6798         default:
6799                 break;
6800         }
6801
6802         return false;
6803 }
6804
6805 /**
6806  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6807  * @hw: pointer to hardware structure
6808  * @lkups: lookup elements or match criteria for the advanced recipe, one
6809  *  structure per protocol header
6810  * @lkups_cnt: number of protocols
6811  * @rinfo: other information regarding the rule e.g. priority and action info
6812  * @rid: return the recipe ID of the recipe created
6813  */
6814 static enum ice_status
6815 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6816                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6817 {
6818         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6819         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6820         struct ice_prot_lkup_ext *lkup_exts;
6821         struct ice_recp_grp_entry *r_entry;
6822         struct ice_sw_fv_list_entry *fvit;
6823         struct ice_recp_grp_entry *r_tmp;
6824         struct ice_sw_fv_list_entry *tmp;
6825         enum ice_status status = ICE_SUCCESS;
6826         struct ice_sw_recipe *rm;
6827         u8 i;
6828
6829         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6830                 return ICE_ERR_PARAM;
6831
6832         lkup_exts = (struct ice_prot_lkup_ext *)
6833                 ice_malloc(hw, sizeof(*lkup_exts));
6834         if (!lkup_exts)
6835                 return ICE_ERR_NO_MEMORY;
6836
6837         /* Determine the number of words to be matched and if it exceeds a
6838          * recipe's restrictions
6839          */
6840         for (i = 0; i < lkups_cnt; i++) {
6841                 u16 count;
6842
6843                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6844                         status = ICE_ERR_CFG;
6845                         goto err_free_lkup_exts;
6846                 }
6847
6848                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6849                 if (!count) {
6850                         status = ICE_ERR_CFG;
6851                         goto err_free_lkup_exts;
6852                 }
6853         }
6854
6855         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6856         if (!rm) {
6857                 status = ICE_ERR_NO_MEMORY;
6858                 goto err_free_lkup_exts;
6859         }
6860
6861         /* Get field vectors that contain fields extracted from all the protocol
6862          * headers being programmed.
6863          */
6864         INIT_LIST_HEAD(&rm->fv_list);
6865         INIT_LIST_HEAD(&rm->rg_list);
6866
6867         /* Get bitmap of field vectors (profiles) that are compatible with the
6868          * rule request; only these will be searched in the subsequent call to
6869          * ice_get_fv.
6870          */
6871         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6872
6873         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6874         if (status)
6875                 goto err_unroll;
6876
6877         /* Create any special protocol/offset pairs, such as looking at tunnel
6878          * bits by extracting metadata
6879          */
6880         status = ice_add_special_words(rinfo, lkup_exts);
6881         if (status)
6882                 goto err_free_lkup_exts;
6883
6884         /* Group match words into recipes using preferred recipe grouping
6885          * criteria.
6886          */
6887         status = ice_create_recipe_group(hw, rm, lkup_exts);
6888         if (status)
6889                 goto err_unroll;
6890
6891         /* set the recipe priority if specified */
6892         rm->priority = (u8)rinfo->priority;
6893
6894         /* Find offsets from the field vector. Pick the first one for all the
6895          * recipes.
6896          */
6897         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6898         if (status)
6899                 goto err_unroll;
6900
6901         /* An empty FV list means to use all the profiles returned in the
6902          * profile bitmap
6903          */
6904         if (LIST_EMPTY(&rm->fv_list)) {
6905                 u16 j;
6906
6907                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
6908                         struct ice_sw_fv_list_entry *fvl;
6909
6910                         fvl = (struct ice_sw_fv_list_entry *)
6911                                 ice_malloc(hw, sizeof(*fvl));
6912                         if (!fvl)
6913                                 goto err_unroll;
6914                         fvl->fv_ptr = NULL;
6915                         fvl->profile_id = j;
6916                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
6917                 }
6918         }
6919
6920         /* get bitmap of all profiles the recipe will be associated with */
6921         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6922         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6923                             list_entry) {
6924                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6925                 ice_set_bit((u16)fvit->profile_id, profiles);
6926         }
6927
6928         /* Look for a recipe which matches our requested fv / mask list */
6929         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6930         if (*rid < ICE_MAX_NUM_RECIPES)
6931                 /* Success if found a recipe that match the existing criteria */
6932                 goto err_unroll;
6933
6934         rm->tun_type = rinfo->tun_type;
6935         /* Recipe we need does not exist, add a recipe */
6936         status = ice_add_sw_recipe(hw, rm, profiles);
6937         if (status)
6938                 goto err_unroll;
6939
6940         /* Associate all the recipes created with all the profiles in the
6941          * common field vector.
6942          */
6943         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6944                             list_entry) {
6945                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6946                 u16 j;
6947
6948                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6949                                                       (u8 *)r_bitmap, NULL);
6950                 if (status)
6951                         goto err_unroll;
6952
6953                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6954                               ICE_MAX_NUM_RECIPES);
6955                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6956                 if (status)
6957                         goto err_unroll;
6958
6959                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6960                                                       (u8 *)r_bitmap,
6961                                                       NULL);
6962                 ice_release_change_lock(hw);
6963
6964                 if (status)
6965                         goto err_unroll;
6966
6967                 /* Update profile to recipe bitmap array */
6968                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6969                               ICE_MAX_NUM_RECIPES);
6970
6971                 /* Update recipe to profile bitmap array */
6972                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
6973                         ice_set_bit((u16)fvit->profile_id,
6974                                     recipe_to_profile[j]);
6975         }
6976
6977         *rid = rm->root_rid;
6978         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6979                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6980 err_unroll:
6981         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6982                                  ice_recp_grp_entry, l_entry) {
6983                 LIST_DEL(&r_entry->l_entry);
6984                 ice_free(hw, r_entry);
6985         }
6986
6987         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6988                                  list_entry) {
6989                 LIST_DEL(&fvit->list_entry);
6990                 ice_free(hw, fvit);
6991         }
6992
6993         if (rm->root_buf)
6994                 ice_free(hw, rm->root_buf);
6995
6996         ice_free(hw, rm);
6997
6998 err_free_lkup_exts:
6999         ice_free(hw, lkup_exts);
7000
7001         return status;
7002 }
7003
7004 /**
7005  * ice_find_dummy_packet - find dummy packet by tunnel type
7006  *
7007  * @lkups: lookup elements or match criteria for the advanced recipe, one
7008  *         structure per protocol header
7009  * @lkups_cnt: number of protocols
7010  * @tun_type: tunnel type from the match criteria
7011  * @pkt: dummy packet to fill according to filter match criteria
7012  * @pkt_len: packet length of dummy packet
7013  * @offsets: pointer to receive the pointer to the offsets for the packet
7014  */
7015 static void
7016 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7017                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7018                       u16 *pkt_len,
7019                       const struct ice_dummy_pkt_offsets **offsets)
7020 {
7021         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7022         bool gre = false;
7023         u16 i;
7024
7025         for (i = 0; i < lkups_cnt; i++) {
7026                 if (lkups[i].type == ICE_UDP_ILOS)
7027                         udp = true;
7028                 else if (lkups[i].type == ICE_TCP_IL)
7029                         tcp = true;
7030                 else if (lkups[i].type == ICE_IPV6_OFOS)
7031                         ipv6 = true;
7032                 else if (lkups[i].type == ICE_VLAN_OFOS)
7033                         vlan = true;
7034                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7035                          lkups[i].h_u.ipv4_hdr.protocol ==
7036                                 ICE_IPV4_NVGRE_PROTO_ID &&
7037                          lkups[i].m_u.ipv4_hdr.protocol ==
7038                                 0xFF)
7039                         gre = true;
7040                 else if (lkups[i].type == ICE_PPPOE &&
7041                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7042                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7043                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7044                                 0xFFFF)
7045                         ipv6 = true;
7046                 else if (lkups[i].type == ICE_ETYPE_OL &&
7047                          lkups[i].h_u.ethertype.ethtype_id ==
7048                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7049                          lkups[i].m_u.ethertype.ethtype_id ==
7050                                         0xFFFF)
7051                         ipv6 = true;
7052                 else if (lkups[i].type == ICE_IPV4_IL &&
7053                          lkups[i].h_u.ipv4_hdr.protocol ==
7054                                 ICE_TCP_PROTO_ID &&
7055                          lkups[i].m_u.ipv4_hdr.protocol ==
7056                                 0xFF)
7057                         tcp = true;
7058         }
7059
7060         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7061                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7062                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7063                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7064                 return;
7065         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7066                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7067                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7068                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7069                 return;
7070         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7071                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7072                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7073                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7074                 return;
7075         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7076                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7077                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7078                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7079                 return;
7080         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7081                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7082                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7083                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7084                 return;
7085         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7086                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7087                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7088                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7089                 return;
7090         }
7091
7092         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7093                 *pkt = dummy_ipv4_esp_pkt;
7094                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7095                 *offsets = dummy_ipv4_esp_packet_offsets;
7096                 return;
7097         }
7098
7099         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7100                 *pkt = dummy_ipv6_esp_pkt;
7101                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7102                 *offsets = dummy_ipv6_esp_packet_offsets;
7103                 return;
7104         }
7105
7106         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7107                 *pkt = dummy_ipv4_ah_pkt;
7108                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7109                 *offsets = dummy_ipv4_ah_packet_offsets;
7110                 return;
7111         }
7112
7113         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7114                 *pkt = dummy_ipv6_ah_pkt;
7115                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7116                 *offsets = dummy_ipv6_ah_packet_offsets;
7117                 return;
7118         }
7119
7120         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7121                 *pkt = dummy_ipv4_nat_pkt;
7122                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7123                 *offsets = dummy_ipv4_nat_packet_offsets;
7124                 return;
7125         }
7126
7127         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7128                 *pkt = dummy_ipv6_nat_pkt;
7129                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7130                 *offsets = dummy_ipv6_nat_packet_offsets;
7131                 return;
7132         }
7133
7134         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7135                 *pkt = dummy_ipv4_l2tpv3_pkt;
7136                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7137                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7138                 return;
7139         }
7140
7141         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7142                 *pkt = dummy_ipv6_l2tpv3_pkt;
7143                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7144                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7145                 return;
7146         }
7147
7148         if (tun_type == ICE_SW_TUN_GTP) {
7149                 *pkt = dummy_udp_gtp_packet;
7150                 *pkt_len = sizeof(dummy_udp_gtp_packet);
7151                 *offsets = dummy_udp_gtp_packet_offsets;
7152                 return;
7153         }
7154
7155         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7156                 *pkt = dummy_pppoe_ipv6_packet;
7157                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7158                 *offsets = dummy_pppoe_packet_offsets;
7159                 return;
7160         } else if (tun_type == ICE_SW_TUN_PPPOE ||
7161                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7162                 *pkt = dummy_pppoe_ipv4_packet;
7163                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7164                 *offsets = dummy_pppoe_packet_offsets;
7165                 return;
7166         }
7167
7168         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7169                 *pkt = dummy_pppoe_ipv4_packet;
7170                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7171                 *offsets = dummy_pppoe_packet_ipv4_offsets;
7172                 return;
7173         }
7174
7175         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7176                 *pkt = dummy_pppoe_ipv4_tcp_packet;
7177                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7178                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7179                 return;
7180         }
7181
7182         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7183                 *pkt = dummy_pppoe_ipv4_udp_packet;
7184                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7185                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7186                 return;
7187         }
7188
7189         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7190                 *pkt = dummy_pppoe_ipv6_packet;
7191                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7192                 *offsets = dummy_pppoe_packet_ipv6_offsets;
7193                 return;
7194         }
7195
7196         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7197                 *pkt = dummy_pppoe_ipv6_tcp_packet;
7198                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7199                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7200                 return;
7201         }
7202
7203         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7204                 *pkt = dummy_pppoe_ipv6_udp_packet;
7205                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7206                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7207                 return;
7208         }
7209
7210         if (tun_type == ICE_SW_IPV4_TCP) {
7211                 *pkt = dummy_tcp_packet;
7212                 *pkt_len = sizeof(dummy_tcp_packet);
7213                 *offsets = dummy_tcp_packet_offsets;
7214                 return;
7215         }
7216
7217         if (tun_type == ICE_SW_IPV4_UDP) {
7218                 *pkt = dummy_udp_packet;
7219                 *pkt_len = sizeof(dummy_udp_packet);
7220                 *offsets = dummy_udp_packet_offsets;
7221                 return;
7222         }
7223
7224         if (tun_type == ICE_SW_IPV6_TCP) {
7225                 *pkt = dummy_tcp_ipv6_packet;
7226                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7227                 *offsets = dummy_tcp_ipv6_packet_offsets;
7228                 return;
7229         }
7230
7231         if (tun_type == ICE_SW_IPV6_UDP) {
7232                 *pkt = dummy_udp_ipv6_packet;
7233                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7234                 *offsets = dummy_udp_ipv6_packet_offsets;
7235                 return;
7236         }
7237
7238         if (tun_type == ICE_ALL_TUNNELS) {
7239                 *pkt = dummy_gre_udp_packet;
7240                 *pkt_len = sizeof(dummy_gre_udp_packet);
7241                 *offsets = dummy_gre_udp_packet_offsets;
7242                 return;
7243         }
7244
7245         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7246                 if (tcp) {
7247                         *pkt = dummy_gre_tcp_packet;
7248                         *pkt_len = sizeof(dummy_gre_tcp_packet);
7249                         *offsets = dummy_gre_tcp_packet_offsets;
7250                         return;
7251                 }
7252
7253                 *pkt = dummy_gre_udp_packet;
7254                 *pkt_len = sizeof(dummy_gre_udp_packet);
7255                 *offsets = dummy_gre_udp_packet_offsets;
7256                 return;
7257         }
7258
7259         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7260             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7261             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7262             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7263                 if (tcp) {
7264                         *pkt = dummy_udp_tun_tcp_packet;
7265                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7266                         *offsets = dummy_udp_tun_tcp_packet_offsets;
7267                         return;
7268                 }
7269
7270                 *pkt = dummy_udp_tun_udp_packet;
7271                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7272                 *offsets = dummy_udp_tun_udp_packet_offsets;
7273                 return;
7274         }
7275
7276         if (udp && !ipv6) {
7277                 if (vlan) {
7278                         *pkt = dummy_vlan_udp_packet;
7279                         *pkt_len = sizeof(dummy_vlan_udp_packet);
7280                         *offsets = dummy_vlan_udp_packet_offsets;
7281                         return;
7282                 }
7283                 *pkt = dummy_udp_packet;
7284                 *pkt_len = sizeof(dummy_udp_packet);
7285                 *offsets = dummy_udp_packet_offsets;
7286                 return;
7287         } else if (udp && ipv6) {
7288                 if (vlan) {
7289                         *pkt = dummy_vlan_udp_ipv6_packet;
7290                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7291                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7292                         return;
7293                 }
7294                 *pkt = dummy_udp_ipv6_packet;
7295                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7296                 *offsets = dummy_udp_ipv6_packet_offsets;
7297                 return;
7298         } else if ((tcp && ipv6) || ipv6) {
7299                 if (vlan) {
7300                         *pkt = dummy_vlan_tcp_ipv6_packet;
7301                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7302                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7303                         return;
7304                 }
7305                 *pkt = dummy_tcp_ipv6_packet;
7306                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7307                 *offsets = dummy_tcp_ipv6_packet_offsets;
7308                 return;
7309         }
7310
7311         if (vlan) {
7312                 *pkt = dummy_vlan_tcp_packet;
7313                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7314                 *offsets = dummy_vlan_tcp_packet_offsets;
7315         } else {
7316                 *pkt = dummy_tcp_packet;
7317                 *pkt_len = sizeof(dummy_tcp_packet);
7318                 *offsets = dummy_tcp_packet_offsets;
7319         }
7320 }
7321
7322 /**
7323  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7324  *
7325  * @lkups: lookup elements or match criteria for the advanced recipe, one
7326  *         structure per protocol header
7327  * @lkups_cnt: number of protocols
7328  * @s_rule: stores rule information from the match criteria
7329  * @dummy_pkt: dummy packet to fill according to filter match criteria
7330  * @pkt_len: packet length of dummy packet
7331  * @offsets: offset info for the dummy packet
7332  */
7333 static enum ice_status
7334 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7335                           struct ice_aqc_sw_rules_elem *s_rule,
7336                           const u8 *dummy_pkt, u16 pkt_len,
7337                           const struct ice_dummy_pkt_offsets *offsets)
7338 {
7339         u8 *pkt;
7340         u16 i;
7341
7342         /* Start with a packet with a pre-defined/dummy content. Then, fill
7343          * in the header values to be looked up or matched.
7344          */
7345         pkt = s_rule->pdata.lkup_tx_rx.hdr;
7346
7347         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7348
7349         for (i = 0; i < lkups_cnt; i++) {
7350                 enum ice_protocol_type type;
7351                 u16 offset = 0, len = 0, j;
7352                 bool found = false;
7353
7354                 /* find the start of this layer; it should be found since this
7355                  * was already checked when search for the dummy packet
7356                  */
7357                 type = lkups[i].type;
7358                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7359                         if (type == offsets[j].type) {
7360                                 offset = offsets[j].offset;
7361                                 found = true;
7362                                 break;
7363                         }
7364                 }
7365                 /* this should never happen in a correct calling sequence */
7366                 if (!found)
7367                         return ICE_ERR_PARAM;
7368
7369                 switch (lkups[i].type) {
7370                 case ICE_MAC_OFOS:
7371                 case ICE_MAC_IL:
7372                         len = sizeof(struct ice_ether_hdr);
7373                         break;
7374                 case ICE_ETYPE_OL:
7375                         len = sizeof(struct ice_ethtype_hdr);
7376                         break;
7377                 case ICE_VLAN_OFOS:
7378                         len = sizeof(struct ice_vlan_hdr);
7379                         break;
7380                 case ICE_IPV4_OFOS:
7381                 case ICE_IPV4_IL:
7382                         len = sizeof(struct ice_ipv4_hdr);
7383                         break;
7384                 case ICE_IPV6_OFOS:
7385                 case ICE_IPV6_IL:
7386                         len = sizeof(struct ice_ipv6_hdr);
7387                         break;
7388                 case ICE_TCP_IL:
7389                 case ICE_UDP_OF:
7390                 case ICE_UDP_ILOS:
7391                         len = sizeof(struct ice_l4_hdr);
7392                         break;
7393                 case ICE_SCTP_IL:
7394                         len = sizeof(struct ice_sctp_hdr);
7395                         break;
7396                 case ICE_NVGRE:
7397                         len = sizeof(struct ice_nvgre);
7398                         break;
7399                 case ICE_VXLAN:
7400                 case ICE_GENEVE:
7401                 case ICE_VXLAN_GPE:
7402                         len = sizeof(struct ice_udp_tnl_hdr);
7403                         break;
7404
7405                 case ICE_GTP:
7406                 case ICE_GTP_NO_PAY:
7407                         len = sizeof(struct ice_udp_gtp_hdr);
7408                         break;
7409                 case ICE_PPPOE:
7410                         len = sizeof(struct ice_pppoe_hdr);
7411                         break;
7412                 case ICE_ESP:
7413                         len = sizeof(struct ice_esp_hdr);
7414                         break;
7415                 case ICE_NAT_T:
7416                         len = sizeof(struct ice_nat_t_hdr);
7417                         break;
7418                 case ICE_AH:
7419                         len = sizeof(struct ice_ah_hdr);
7420                         break;
7421                 case ICE_L2TPV3:
7422                         len = sizeof(struct ice_l2tpv3_sess_hdr);
7423                         break;
7424                 default:
7425                         return ICE_ERR_PARAM;
7426                 }
7427
7428                 /* the length should be a word multiple */
7429                 if (len % ICE_BYTES_PER_WORD)
7430                         return ICE_ERR_CFG;
7431
7432                 /* We have the offset to the header start, the length, the
7433                  * caller's header values and mask. Use this information to
7434                  * copy the data into the dummy packet appropriately based on
7435                  * the mask. Note that we need to only write the bits as
7436                  * indicated by the mask to make sure we don't improperly write
7437                  * over any significant packet data.
7438                  */
7439                 for (j = 0; j < len / sizeof(u16); j++)
7440                         if (((u16 *)&lkups[i].m_u)[j])
7441                                 ((u16 *)(pkt + offset))[j] =
7442                                         (((u16 *)(pkt + offset))[j] &
7443                                          ~((u16 *)&lkups[i].m_u)[j]) |
7444                                         (((u16 *)&lkups[i].h_u)[j] &
7445                                          ((u16 *)&lkups[i].m_u)[j]);
7446         }
7447
7448         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7449
7450         return ICE_SUCCESS;
7451 }
7452
7453 /**
7454  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7455  * @hw: pointer to the hardware structure
7456  * @tun_type: tunnel type
7457  * @pkt: dummy packet to fill in
7458  * @offsets: offset info for the dummy packet
7459  */
7460 static enum ice_status
7461 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7462                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7463 {
7464         u16 open_port, i;
7465
7466         switch (tun_type) {
7467         case ICE_SW_TUN_AND_NON_TUN:
7468         case ICE_SW_TUN_VXLAN_GPE:
7469         case ICE_SW_TUN_VXLAN:
7470         case ICE_SW_TUN_VXLAN_VLAN:
7471         case ICE_SW_TUN_UDP:
7472                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7473                         return ICE_ERR_CFG;
7474                 break;
7475
7476         case ICE_SW_TUN_GENEVE:
7477         case ICE_SW_TUN_GENEVE_VLAN:
7478                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7479                         return ICE_ERR_CFG;
7480                 break;
7481
7482         default:
7483                 /* Nothing needs to be done for this tunnel type */
7484                 return ICE_SUCCESS;
7485         }
7486
7487         /* Find the outer UDP protocol header and insert the port number */
7488         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7489                 if (offsets[i].type == ICE_UDP_OF) {
7490                         struct ice_l4_hdr *hdr;
7491                         u16 offset;
7492
7493                         offset = offsets[i].offset;
7494                         hdr = (struct ice_l4_hdr *)&pkt[offset];
7495                         hdr->dst_port = CPU_TO_BE16(open_port);
7496
7497                         return ICE_SUCCESS;
7498                 }
7499         }
7500
7501         return ICE_ERR_CFG;
7502 }
7503
7504 /**
7505  * ice_find_adv_rule_entry - Search a rule entry
7506  * @hw: pointer to the hardware structure
7507  * @lkups: lookup elements or match criteria for the advanced recipe, one
7508  *         structure per protocol header
7509  * @lkups_cnt: number of protocols
7510  * @recp_id: recipe ID for which we are finding the rule
7511  * @rinfo: other information regarding the rule e.g. priority and action info
7512  *
7513  * Helper function to search for a given advance rule entry
7514  * Returns pointer to entry storing the rule if found
7515  */
7516 static struct ice_adv_fltr_mgmt_list_entry *
7517 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7518                         u16 lkups_cnt, u16 recp_id,
7519                         struct ice_adv_rule_info *rinfo)
7520 {
7521         struct ice_adv_fltr_mgmt_list_entry *list_itr;
7522         struct ice_switch_info *sw = hw->switch_info;
7523         int i;
7524
7525         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7526                             ice_adv_fltr_mgmt_list_entry, list_entry) {
7527                 bool lkups_matched = true;
7528
7529                 if (lkups_cnt != list_itr->lkups_cnt)
7530                         continue;
7531                 for (i = 0; i < list_itr->lkups_cnt; i++)
7532                         if (memcmp(&list_itr->lkups[i], &lkups[i],
7533                                    sizeof(*lkups))) {
7534                                 lkups_matched = false;
7535                                 break;
7536                         }
7537                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7538                     rinfo->tun_type == list_itr->rule_info.tun_type &&
7539                     lkups_matched)
7540                         return list_itr;
7541         }
7542         return NULL;
7543 }
7544
7545 /**
7546  * ice_adv_add_update_vsi_list
7547  * @hw: pointer to the hardware structure
7548  * @m_entry: pointer to current adv filter management list entry
7549  * @cur_fltr: filter information from the book keeping entry
7550  * @new_fltr: filter information with the new VSI to be added
7551  *
7552  * Call AQ command to add or update previously created VSI list with new VSI.
7553  *
7554  * Helper function to do book keeping associated with adding filter information
7555  * The algorithm to do the booking keeping is described below :
7556  * When a VSI needs to subscribe to a given advanced filter
7557  *      if only one VSI has been added till now
7558  *              Allocate a new VSI list and add two VSIs
7559  *              to this list using switch rule command
7560  *              Update the previously created switch rule with the
7561  *              newly created VSI list ID
7562  *      if a VSI list was previously created
7563  *              Add the new VSI to the previously created VSI list set
7564  *              using the update switch rule command
7565  */
7566 static enum ice_status
7567 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7568                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
7569                             struct ice_adv_rule_info *cur_fltr,
7570                             struct ice_adv_rule_info *new_fltr)
7571 {
7572         enum ice_status status;
7573         u16 vsi_list_id = 0;
7574
7575         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7576             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7577             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7578                 return ICE_ERR_NOT_IMPL;
7579
7580         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7581              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7582             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7583              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7584                 return ICE_ERR_NOT_IMPL;
7585
7586         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7587                  /* Only one entry existed in the mapping and it was not already
7588                   * a part of a VSI list. So, create a VSI list with the old and
7589                   * new VSIs.
7590                   */
7591                 struct ice_fltr_info tmp_fltr;
7592                 u16 vsi_handle_arr[2];
7593
7594                 /* A rule already exists with the new VSI being added */
7595                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7596                     new_fltr->sw_act.fwd_id.hw_vsi_id)
7597                         return ICE_ERR_ALREADY_EXISTS;
7598
7599                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7600                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7601                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7602                                                   &vsi_list_id,
7603                                                   ICE_SW_LKUP_LAST);
7604                 if (status)
7605                         return status;
7606
7607                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7608                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7609                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7610                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7611                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7612                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7613
7614                 /* Update the previous switch rule of "forward to VSI" to
7615                  * "fwd to VSI list"
7616                  */
7617                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7618                 if (status)
7619                         return status;
7620
7621                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7622                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7623                 m_entry->vsi_list_info =
7624                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7625                                                 vsi_list_id);
7626         } else {
7627                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7628
7629                 if (!m_entry->vsi_list_info)
7630                         return ICE_ERR_CFG;
7631
7632                 /* A rule already exists with the new VSI being added */
7633                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7634                         return ICE_SUCCESS;
7635
7636                 /* Update the previously created VSI list set with
7637                  * the new VSI ID passed in
7638                  */
7639                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7640
7641                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7642                                                   vsi_list_id, false,
7643                                                   ice_aqc_opc_update_sw_rules,
7644                                                   ICE_SW_LKUP_LAST);
7645                 /* update VSI list mapping info with new VSI ID */
7646                 if (!status)
7647                         ice_set_bit(vsi_handle,
7648                                     m_entry->vsi_list_info->vsi_map);
7649         }
7650         if (!status)
7651                 m_entry->vsi_count++;
7652         return status;
7653 }
7654
7655 /**
7656  * ice_add_adv_rule - helper function to create an advanced switch rule
7657  * @hw: pointer to the hardware structure
7658  * @lkups: information on the words that needs to be looked up. All words
7659  * together makes one recipe
7660  * @lkups_cnt: num of entries in the lkups array
7661  * @rinfo: other information related to the rule that needs to be programmed
7662  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7663  *               ignored is case of error.
7664  *
7665  * This function can program only 1 rule at a time. The lkups is used to
7666  * describe the all the words that forms the "lookup" portion of the recipe.
7667  * These words can span multiple protocols. Callers to this function need to
7668  * pass in a list of protocol headers with lookup information along and mask
7669  * that determines which words are valid from the given protocol header.
7670  * rinfo describes other information related to this rule such as forwarding
7671  * IDs, priority of this rule, etc.
7672  */
7673 enum ice_status
7674 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7675                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7676                  struct ice_rule_query_data *added_entry)
7677 {
7678         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7679         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7680         const struct ice_dummy_pkt_offsets *pkt_offsets;
7681         struct ice_aqc_sw_rules_elem *s_rule = NULL;
7682         struct LIST_HEAD_TYPE *rule_head;
7683         struct ice_switch_info *sw;
7684         enum ice_status status;
7685         const u8 *pkt = NULL;
7686         bool prof_rule;
7687         u16 word_cnt;
7688         u32 act = 0;
7689         u8 q_rgn;
7690
7691         /* Initialize profile to result index bitmap */
7692         if (!hw->switch_info->prof_res_bm_init) {
7693                 hw->switch_info->prof_res_bm_init = 1;
7694                 ice_init_prof_result_bm(hw);
7695         }
7696
7697         prof_rule = ice_is_prof_rule(rinfo->tun_type);
7698         if (!prof_rule && !lkups_cnt)
7699                 return ICE_ERR_PARAM;
7700
7701         /* get # of words we need to match */
7702         word_cnt = 0;
7703         for (i = 0; i < lkups_cnt; i++) {
7704                 u16 j, *ptr;
7705
7706                 ptr = (u16 *)&lkups[i].m_u;
7707                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7708                         if (ptr[j] != 0)
7709                                 word_cnt++;
7710         }
7711
7712         if (prof_rule) {
7713                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7714                         return ICE_ERR_PARAM;
7715         } else {
7716                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7717                         return ICE_ERR_PARAM;
7718         }
7719
7720         /* make sure that we can locate a dummy packet */
7721         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7722                               &pkt_offsets);
7723         if (!pkt) {
7724                 status = ICE_ERR_PARAM;
7725                 goto err_ice_add_adv_rule;
7726         }
7727
7728         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7729               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7730               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7731               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7732                 return ICE_ERR_CFG;
7733
7734         vsi_handle = rinfo->sw_act.vsi_handle;
7735         if (!ice_is_vsi_valid(hw, vsi_handle))
7736                 return ICE_ERR_PARAM;
7737
7738         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7739                 rinfo->sw_act.fwd_id.hw_vsi_id =
7740                         ice_get_hw_vsi_num(hw, vsi_handle);
7741         if (rinfo->sw_act.flag & ICE_FLTR_TX)
7742                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7743
7744         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7745         if (status)
7746                 return status;
7747         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7748         if (m_entry) {
7749                 /* we have to add VSI to VSI_LIST and increment vsi_count.
7750                  * Also Update VSI list so that we can change forwarding rule
7751                  * if the rule already exists, we will check if it exists with
7752                  * same vsi_id, if not then add it to the VSI list if it already
7753                  * exists if not then create a VSI list and add the existing VSI
7754                  * ID and the new VSI ID to the list
7755                  * We will add that VSI to the list
7756                  */
7757                 status = ice_adv_add_update_vsi_list(hw, m_entry,
7758                                                      &m_entry->rule_info,
7759                                                      rinfo);
7760                 if (added_entry) {
7761                         added_entry->rid = rid;
7762                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7763                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7764                 }
7765                 return status;
7766         }
7767         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7768         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7769         if (!s_rule)
7770                 return ICE_ERR_NO_MEMORY;
7771         act |= ICE_SINGLE_ACT_LAN_ENABLE;
7772         switch (rinfo->sw_act.fltr_act) {
7773         case ICE_FWD_TO_VSI:
7774                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7775                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7776                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7777                 break;
7778         case ICE_FWD_TO_Q:
7779                 act |= ICE_SINGLE_ACT_TO_Q;
7780                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7781                        ICE_SINGLE_ACT_Q_INDEX_M;
7782                 break;
7783         case ICE_FWD_TO_QGRP:
7784                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7785                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7786                 act |= ICE_SINGLE_ACT_TO_Q;
7787                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7788                        ICE_SINGLE_ACT_Q_INDEX_M;
7789                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7790                        ICE_SINGLE_ACT_Q_REGION_M;
7791                 break;
7792         case ICE_DROP_PACKET:
7793                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7794                        ICE_SINGLE_ACT_VALID_BIT;
7795                 break;
7796         default:
7797                 status = ICE_ERR_CFG;
7798                 goto err_ice_add_adv_rule;
7799         }
7800
7801         /* set the rule LOOKUP type based on caller specified 'RX'
7802          * instead of hardcoding it to be either LOOKUP_TX/RX
7803          *
7804          * for 'RX' set the source to be the port number
7805          * for 'TX' set the source to be the source HW VSI number (determined
7806          * by caller)
7807          */
7808         if (rinfo->rx) {
7809                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7810                 s_rule->pdata.lkup_tx_rx.src =
7811                         CPU_TO_LE16(hw->port_info->lport);
7812         } else {
7813                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7814                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7815         }
7816
7817         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7818         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7819
7820         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7821                                            pkt_len, pkt_offsets);
7822         if (status)
7823                 goto err_ice_add_adv_rule;
7824
7825         if (rinfo->tun_type != ICE_NON_TUN &&
7826             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7827                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7828                                                  s_rule->pdata.lkup_tx_rx.hdr,
7829                                                  pkt_offsets);
7830                 if (status)
7831                         goto err_ice_add_adv_rule;
7832         }
7833
7834         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7835                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7836                                  NULL);
7837         if (status)
7838                 goto err_ice_add_adv_rule;
7839         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7840                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7841         if (!adv_fltr) {
7842                 status = ICE_ERR_NO_MEMORY;
7843                 goto err_ice_add_adv_rule;
7844         }
7845
7846         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7847                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7848                            ICE_NONDMA_TO_NONDMA);
7849         if (!adv_fltr->lkups && !prof_rule) {
7850                 status = ICE_ERR_NO_MEMORY;
7851                 goto err_ice_add_adv_rule;
7852         }
7853
7854         adv_fltr->lkups_cnt = lkups_cnt;
7855         adv_fltr->rule_info = *rinfo;
7856         adv_fltr->rule_info.fltr_rule_id =
7857                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7858         sw = hw->switch_info;
7859         sw->recp_list[rid].adv_rule = true;
7860         rule_head = &sw->recp_list[rid].filt_rules;
7861
7862         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7863                 adv_fltr->vsi_count = 1;
7864
7865         /* Add rule entry to book keeping list */
7866         LIST_ADD(&adv_fltr->list_entry, rule_head);
7867         if (added_entry) {
7868                 added_entry->rid = rid;
7869                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7870                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7871         }
7872 err_ice_add_adv_rule:
7873         if (status && adv_fltr) {
7874                 ice_free(hw, adv_fltr->lkups);
7875                 ice_free(hw, adv_fltr);
7876         }
7877
7878         ice_free(hw, s_rule);
7879
7880         return status;
7881 }
7882
7883 /**
7884  * ice_adv_rem_update_vsi_list
7885  * @hw: pointer to the hardware structure
7886  * @vsi_handle: VSI handle of the VSI to remove
7887  * @fm_list: filter management entry for which the VSI list management needs to
7888  *           be done
7889  */
7890 static enum ice_status
7891 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7892                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
7893 {
7894         struct ice_vsi_list_map_info *vsi_list_info;
7895         enum ice_sw_lkup_type lkup_type;
7896         enum ice_status status;
7897         u16 vsi_list_id;
7898
7899         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7900             fm_list->vsi_count == 0)
7901                 return ICE_ERR_PARAM;
7902
7903         /* A rule with the VSI being removed does not exist */
7904         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7905                 return ICE_ERR_DOES_NOT_EXIST;
7906
7907         lkup_type = ICE_SW_LKUP_LAST;
7908         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7909         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7910                                           ice_aqc_opc_update_sw_rules,
7911                                           lkup_type);
7912         if (status)
7913                 return status;
7914
7915         fm_list->vsi_count--;
7916         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7917         vsi_list_info = fm_list->vsi_list_info;
7918         if (fm_list->vsi_count == 1) {
7919                 struct ice_fltr_info tmp_fltr;
7920                 u16 rem_vsi_handle;
7921
7922                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7923                                                     ICE_MAX_VSI);
7924                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7925                         return ICE_ERR_OUT_OF_RANGE;
7926
7927                 /* Make sure VSI list is empty before removing it below */
7928                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7929                                                   vsi_list_id, true,
7930                                                   ice_aqc_opc_update_sw_rules,
7931                                                   lkup_type);
7932                 if (status)
7933                         return status;
7934
7935                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7936                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7937                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7938                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7939                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7940                 tmp_fltr.fwd_id.hw_vsi_id =
7941                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
7942                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7943                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
7944                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7945
7946                 /* Update the previous switch rule of "MAC forward to VSI" to
7947                  * "MAC fwd to VSI list"
7948                  */
7949                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7950                 if (status) {
7951                         ice_debug(hw, ICE_DBG_SW,
7952                                   "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7953                                   tmp_fltr.fwd_id.hw_vsi_id, status);
7954                         return status;
7955                 }
7956                 fm_list->vsi_list_info->ref_cnt--;
7957
7958                 /* Remove the VSI list since it is no longer used */
7959                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7960                 if (status) {
7961                         ice_debug(hw, ICE_DBG_SW,
7962                                   "Failed to remove VSI list %d, error %d\n",
7963                                   vsi_list_id, status);
7964                         return status;
7965                 }
7966
7967                 LIST_DEL(&vsi_list_info->list_entry);
7968                 ice_free(hw, vsi_list_info);
7969                 fm_list->vsi_list_info = NULL;
7970         }
7971
7972         return status;
7973 }
7974
7975 /**
7976  * ice_rem_adv_rule - removes existing advanced switch rule
7977  * @hw: pointer to the hardware structure
7978  * @lkups: information on the words that needs to be looked up. All words
7979  *         together makes one recipe
7980  * @lkups_cnt: num of entries in the lkups array
7981  * @rinfo: Its the pointer to the rule information for the rule
7982  *
7983  * This function can be used to remove 1 rule at a time. The lkups is
7984  * used to describe all the words that forms the "lookup" portion of the
7985  * rule. These words can span multiple protocols. Callers to this function
7986  * need to pass in a list of protocol headers with lookup information along
7987  * and mask that determines which words are valid from the given protocol
7988  * header. rinfo describes other information related to this rule such as
7989  * forwarding IDs, priority of this rule, etc.
7990  */
7991 enum ice_status
7992 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7993                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7994 {
7995         struct ice_adv_fltr_mgmt_list_entry *list_elem;
7996         struct ice_prot_lkup_ext lkup_exts;
7997         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7998         enum ice_status status = ICE_SUCCESS;
7999         bool remove_rule = false;
8000         u16 i, rid, vsi_handle;
8001
8002         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8003         for (i = 0; i < lkups_cnt; i++) {
8004                 u16 count;
8005
8006                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8007                         return ICE_ERR_CFG;
8008
8009                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8010                 if (!count)
8011                         return ICE_ERR_CFG;
8012         }
8013
8014         /* Create any special protocol/offset pairs, such as looking at tunnel
8015          * bits by extracting metadata
8016          */
8017         status = ice_add_special_words(rinfo, &lkup_exts);
8018         if (status)
8019                 return status;
8020
8021         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8022         /* If did not find a recipe that match the existing criteria */
8023         if (rid == ICE_MAX_NUM_RECIPES)
8024                 return ICE_ERR_PARAM;
8025
8026         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8027         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8028         /* the rule is already removed */
8029         if (!list_elem)
8030                 return ICE_SUCCESS;
8031         ice_acquire_lock(rule_lock);
8032         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8033                 remove_rule = true;
8034         } else if (list_elem->vsi_count > 1) {
8035                 remove_rule = false;
8036                 vsi_handle = rinfo->sw_act.vsi_handle;
8037                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8038         } else {
8039                 vsi_handle = rinfo->sw_act.vsi_handle;
8040                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8041                 if (status) {
8042                         ice_release_lock(rule_lock);
8043                         return status;
8044                 }
8045                 if (list_elem->vsi_count == 0)
8046                         remove_rule = true;
8047         }
8048         ice_release_lock(rule_lock);
8049         if (remove_rule) {
8050                 struct ice_aqc_sw_rules_elem *s_rule;
8051                 u16 rule_buf_sz;
8052
8053                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8054                 s_rule =
8055                         (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
8056                                                                    rule_buf_sz);
8057                 if (!s_rule)
8058                         return ICE_ERR_NO_MEMORY;
8059                 s_rule->pdata.lkup_tx_rx.act = 0;
8060                 s_rule->pdata.lkup_tx_rx.index =
8061                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8062                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8063                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8064                                          rule_buf_sz, 1,
8065                                          ice_aqc_opc_remove_sw_rules, NULL);
8066                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8067                         struct ice_switch_info *sw = hw->switch_info;
8068
8069                         ice_acquire_lock(rule_lock);
8070                         LIST_DEL(&list_elem->list_entry);
8071                         ice_free(hw, list_elem->lkups);
8072                         ice_free(hw, list_elem);
8073                         ice_release_lock(rule_lock);
8074                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8075                                 sw->recp_list[rid].adv_rule = false;
8076                 }
8077                 ice_free(hw, s_rule);
8078         }
8079         return status;
8080 }
8081
8082 /**
8083  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8084  * @hw: pointer to the hardware structure
8085  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8086  *
8087  * This function is used to remove 1 rule at a time. The removal is based on
8088  * the remove_entry parameter. This function will remove rule for a given
8089  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8090  */
8091 enum ice_status
8092 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8093                        struct ice_rule_query_data *remove_entry)
8094 {
8095         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8096         struct LIST_HEAD_TYPE *list_head;
8097         struct ice_adv_rule_info rinfo;
8098         struct ice_switch_info *sw;
8099
8100         sw = hw->switch_info;
8101         if (!sw->recp_list[remove_entry->rid].recp_created)
8102                 return ICE_ERR_PARAM;
8103         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8104         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8105                             list_entry) {
8106                 if (list_itr->rule_info.fltr_rule_id ==
8107                     remove_entry->rule_id) {
8108                         rinfo = list_itr->rule_info;
8109                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8110                         return ice_rem_adv_rule(hw, list_itr->lkups,
8111                                                 list_itr->lkups_cnt, &rinfo);
8112                 }
8113         }
8114         /* either list is empty or unable to find rule */
8115         return ICE_ERR_DOES_NOT_EXIST;
8116 }
8117
8118 /**
8119  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8120  *                       given VSI handle
8121  * @hw: pointer to the hardware structure
8122  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8123  *
8124  * This function is used to remove all the rules for a given VSI and as soon
8125  * as removing a rule fails, it will return immediately with the error code,
8126  * else it will return ICE_SUCCESS
8127  */
8128 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8129 {
8130         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8131         struct ice_vsi_list_map_info *map_info;
8132         struct LIST_HEAD_TYPE *list_head;
8133         struct ice_adv_rule_info rinfo;
8134         struct ice_switch_info *sw;
8135         enum ice_status status;
8136         u16 vsi_list_id = 0;
8137         u8 rid;
8138
8139         sw = hw->switch_info;
8140         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8141                 if (!sw->recp_list[rid].recp_created)
8142                         continue;
8143                 if (!sw->recp_list[rid].adv_rule)
8144                         continue;
8145                 list_head = &sw->recp_list[rid].filt_rules;
8146                 map_info = NULL;
8147                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
8148                                     ice_adv_fltr_mgmt_list_entry, list_entry) {
8149                         map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
8150                                                            vsi_handle,
8151                                                            &vsi_list_id);
8152                         if (!map_info)
8153                                 continue;
8154                         rinfo = list_itr->rule_info;
8155                         rinfo.sw_act.vsi_handle = vsi_handle;
8156                         status = ice_rem_adv_rule(hw, list_itr->lkups,
8157                                                   list_itr->lkups_cnt, &rinfo);
8158                         if (status)
8159                                 return status;
8160                         map_info = NULL;
8161                 }
8162         }
8163         return ICE_SUCCESS;
8164 }
8165
8166 /**
8167  * ice_replay_fltr - Replay all the filters stored by a specific list head
8168  * @hw: pointer to the hardware structure
8169  * @list_head: list for which filters needs to be replayed
8170  * @recp_id: Recipe ID for which rules need to be replayed
8171  */
8172 static enum ice_status
8173 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8174 {
8175         struct ice_fltr_mgmt_list_entry *itr;
8176         enum ice_status status = ICE_SUCCESS;
8177         struct ice_sw_recipe *recp_list;
8178         u8 lport = hw->port_info->lport;
8179         struct LIST_HEAD_TYPE l_head;
8180
8181         if (LIST_EMPTY(list_head))
8182                 return status;
8183
8184         recp_list = &hw->switch_info->recp_list[recp_id];
8185         /* Move entries from the given list_head to a temporary l_head so that
8186          * they can be replayed. Otherwise when trying to re-add the same
8187          * filter, the function will return already exists
8188          */
8189         LIST_REPLACE_INIT(list_head, &l_head);
8190
8191         /* Mark the given list_head empty by reinitializing it so filters
8192          * could be added again by *handler
8193          */
8194         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8195                             list_entry) {
8196                 struct ice_fltr_list_entry f_entry;
8197                 u16 vsi_handle;
8198
8199                 f_entry.fltr_info = itr->fltr_info;
8200                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8201                         status = ice_add_rule_internal(hw, recp_list, lport,
8202                                                        &f_entry);
8203                         if (status != ICE_SUCCESS)
8204                                 goto end;
8205                         continue;
8206                 }
8207
8208                 /* Add a filter per VSI separately */
8209                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8210                                      ICE_MAX_VSI) {
8211                         if (!ice_is_vsi_valid(hw, vsi_handle))
8212                                 break;
8213
8214                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8215                         f_entry.fltr_info.vsi_handle = vsi_handle;
8216                         f_entry.fltr_info.fwd_id.hw_vsi_id =
8217                                 ice_get_hw_vsi_num(hw, vsi_handle);
8218                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8219                         if (recp_id == ICE_SW_LKUP_VLAN)
8220                                 status = ice_add_vlan_internal(hw, recp_list,
8221                                                                &f_entry);
8222                         else
8223                                 status = ice_add_rule_internal(hw, recp_list,
8224                                                                lport,
8225                                                                &f_entry);
8226                         if (status != ICE_SUCCESS)
8227                                 goto end;
8228                 }
8229         }
8230 end:
8231         /* Clear the filter management list */
8232         ice_rem_sw_rule_info(hw, &l_head);
8233         return status;
8234 }
8235
8236 /**
8237  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8238  * @hw: pointer to the hardware structure
8239  *
8240  * NOTE: This function does not clean up partially added filters on error.
8241  * It is up to caller of the function to issue a reset or fail early.
8242  */
8243 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8244 {
8245         struct ice_switch_info *sw = hw->switch_info;
8246         enum ice_status status = ICE_SUCCESS;
8247         u8 i;
8248
8249         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8250                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8251
8252                 status = ice_replay_fltr(hw, i, head);
8253                 if (status != ICE_SUCCESS)
8254                         return status;
8255         }
8256         return status;
8257 }
8258
8259 /**
8260  * ice_replay_vsi_fltr - Replay filters for requested VSI
8261  * @hw: pointer to the hardware structure
8262  * @pi: pointer to port information structure
8263  * @sw: pointer to switch info struct for which function replays filters
8264  * @vsi_handle: driver VSI handle
8265  * @recp_id: Recipe ID for which rules need to be replayed
8266  * @list_head: list for which filters need to be replayed
8267  *
8268  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8269  * It is required to pass valid VSI handle.
8270  */
8271 static enum ice_status
8272 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8273                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8274                     struct LIST_HEAD_TYPE *list_head)
8275 {
8276         struct ice_fltr_mgmt_list_entry *itr;
8277         enum ice_status status = ICE_SUCCESS;
8278         struct ice_sw_recipe *recp_list;
8279         u16 hw_vsi_id;
8280
8281         if (LIST_EMPTY(list_head))
8282                 return status;
8283         recp_list = &sw->recp_list[recp_id];
8284         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8285
8286         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8287                             list_entry) {
8288                 struct ice_fltr_list_entry f_entry;
8289
8290                 f_entry.fltr_info = itr->fltr_info;
8291                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8292                     itr->fltr_info.vsi_handle == vsi_handle) {
8293                         /* update the src in case it is VSI num */
8294                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8295                                 f_entry.fltr_info.src = hw_vsi_id;
8296                         status = ice_add_rule_internal(hw, recp_list,
8297                                                        pi->lport,
8298                                                        &f_entry);
8299                         if (status != ICE_SUCCESS)
8300                                 goto end;
8301                         continue;
8302                 }
8303                 if (!itr->vsi_list_info ||
8304                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8305                         continue;
8306                 /* Clearing it so that the logic can add it back */
8307                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8308                 f_entry.fltr_info.vsi_handle = vsi_handle;
8309                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8310                 /* update the src in case it is VSI num */
8311                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8312                         f_entry.fltr_info.src = hw_vsi_id;
8313                 if (recp_id == ICE_SW_LKUP_VLAN)
8314                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8315                 else
8316                         status = ice_add_rule_internal(hw, recp_list,
8317                                                        pi->lport,
8318                                                        &f_entry);
8319                 if (status != ICE_SUCCESS)
8320                         goto end;
8321         }
8322 end:
8323         return status;
8324 }
8325
8326 /**
8327  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8328  * @hw: pointer to the hardware structure
8329  * @vsi_handle: driver VSI handle
8330  * @list_head: list for which filters need to be replayed
8331  *
8332  * Replay the advanced rule for the given VSI.
8333  */
8334 static enum ice_status
8335 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8336                         struct LIST_HEAD_TYPE *list_head)
8337 {
8338         struct ice_rule_query_data added_entry = { 0 };
8339         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8340         enum ice_status status = ICE_SUCCESS;
8341
8342         if (LIST_EMPTY(list_head))
8343                 return status;
8344         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8345                             list_entry) {
8346                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8347                 u16 lk_cnt = adv_fltr->lkups_cnt;
8348
8349                 if (vsi_handle != rinfo->sw_act.vsi_handle)
8350                         continue;
8351                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8352                                           &added_entry);
8353                 if (status)
8354                         break;
8355         }
8356         return status;
8357 }
8358
8359 /**
8360  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8361  * @hw: pointer to the hardware structure
8362  * @pi: pointer to port information structure
8363  * @vsi_handle: driver VSI handle
8364  *
8365  * Replays filters for requested VSI via vsi_handle.
8366  */
8367 enum ice_status
8368 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8369                         u16 vsi_handle)
8370 {
8371         struct ice_switch_info *sw = hw->switch_info;
8372         enum ice_status status;
8373         u8 i;
8374
8375         /* Update the recipes that were created */
8376         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8377                 struct LIST_HEAD_TYPE *head;
8378
8379                 head = &sw->recp_list[i].filt_replay_rules;
8380                 if (!sw->recp_list[i].adv_rule)
8381                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8382                                                      head);
8383                 else
8384                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8385                 if (status != ICE_SUCCESS)
8386                         return status;
8387         }
8388
8389         return ICE_SUCCESS;
8390 }
8391
8392 /**
8393  * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8394  * @hw: pointer to the HW struct
8395  * @sw: pointer to switch info struct for which function removes filters
8396  *
8397  * Deletes the filter replay rules for given switch
8398  */
8399 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8400 {
8401         u8 i;
8402
8403         if (!sw)
8404                 return;
8405
8406         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8407                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8408                         struct LIST_HEAD_TYPE *l_head;
8409
8410                         l_head = &sw->recp_list[i].filt_replay_rules;
8411                         if (!sw->recp_list[i].adv_rule)
8412                                 ice_rem_sw_rule_info(hw, l_head);
8413                         else
8414                                 ice_rem_adv_rule_info(hw, l_head);
8415                 }
8416         }
8417 }
8418
8419 /**
8420  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8421  * @hw: pointer to the HW struct
8422  *
8423  * Deletes the filter replay rules.
8424  */
8425 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8426 {
8427         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
8428 }