a82af6fa0b8ec09432374ee973a0677b9eecf424
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
14 #define ICE_PPP_IPV6_PROTO_ID           0x0057
15 #define ICE_IPV6_ETHER_ID               0x86DD
16 #define ICE_TCP_PROTO_ID                0x06
17
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19  * struct to configure any switch filter rules.
20  * {DA (6 bytes), SA(6 bytes),
21  * Ether type (2 bytes for header without VLAN tag) OR
22  * VLAN tag (4 bytes for header with VLAN tag) }
23  *
24  * Word on Hardcoded values
25  * byte 0 = 0x2: to identify it as locally administered DA MAC
26  * byte 6 = 0x2: to identify it as locally administered SA MAC
27  * byte 12 = 0x81 & byte 13 = 0x00:
28  *      In case of VLAN filter first two bytes defines ether type (0x8100)
29  *      and remaining two bytes are placeholder for programming a given VLAN ID
30  *      In case of Ether type filter it is treated as header without VLAN tag
31  *      and byte 12 and 13 is used to program a given Ether type instead
32  */
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34                                                         0x2, 0, 0, 0, 0, 0,
35                                                         0x81, 0, 0, 0};
36
37 struct ice_dummy_pkt_offsets {
38         enum ice_protocol_type type;
39         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
40 };
41
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
43         { ICE_MAC_OFOS,         0 },
44         { ICE_ETYPE_OL,         12 },
45         { ICE_IPV4_OFOS,        14 },
46         { ICE_NVGRE,            34 },
47         { ICE_MAC_IL,           42 },
48         { ICE_IPV4_IL,          56 },
49         { ICE_TCP_IL,           76 },
50         { ICE_PROTOCOL_LAST,    0 },
51 };
52
53 static const u8 dummy_gre_tcp_packet[] = {
54         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
55         0x00, 0x00, 0x00, 0x00,
56         0x00, 0x00, 0x00, 0x00,
57
58         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
59
60         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
61         0x00, 0x00, 0x00, 0x00,
62         0x00, 0x2F, 0x00, 0x00,
63         0x00, 0x00, 0x00, 0x00,
64         0x00, 0x00, 0x00, 0x00,
65
66         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
67         0x00, 0x00, 0x00, 0x00,
68
69         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
70         0x00, 0x00, 0x00, 0x00,
71         0x00, 0x00, 0x00, 0x00,
72         0x08, 0x00,
73
74         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
75         0x00, 0x00, 0x00, 0x00,
76         0x00, 0x06, 0x00, 0x00,
77         0x00, 0x00, 0x00, 0x00,
78         0x00, 0x00, 0x00, 0x00,
79
80         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
81         0x00, 0x00, 0x00, 0x00,
82         0x00, 0x00, 0x00, 0x00,
83         0x50, 0x02, 0x20, 0x00,
84         0x00, 0x00, 0x00, 0x00
85 };
86
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
88         { ICE_MAC_OFOS,         0 },
89         { ICE_ETYPE_OL,         12 },
90         { ICE_IPV4_OFOS,        14 },
91         { ICE_NVGRE,            34 },
92         { ICE_MAC_IL,           42 },
93         { ICE_IPV4_IL,          56 },
94         { ICE_UDP_ILOS,         76 },
95         { ICE_PROTOCOL_LAST,    0 },
96 };
97
98 static const u8 dummy_gre_udp_packet[] = {
99         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
100         0x00, 0x00, 0x00, 0x00,
101         0x00, 0x00, 0x00, 0x00,
102
103         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
104
105         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
106         0x00, 0x00, 0x00, 0x00,
107         0x00, 0x2F, 0x00, 0x00,
108         0x00, 0x00, 0x00, 0x00,
109         0x00, 0x00, 0x00, 0x00,
110
111         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
112         0x00, 0x00, 0x00, 0x00,
113
114         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
115         0x00, 0x00, 0x00, 0x00,
116         0x00, 0x00, 0x00, 0x00,
117         0x08, 0x00,
118
119         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
120         0x00, 0x00, 0x00, 0x00,
121         0x00, 0x11, 0x00, 0x00,
122         0x00, 0x00, 0x00, 0x00,
123         0x00, 0x00, 0x00, 0x00,
124
125         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
126         0x00, 0x08, 0x00, 0x00,
127 };
128
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130         { ICE_MAC_OFOS,         0 },
131         { ICE_ETYPE_OL,         12 },
132         { ICE_IPV4_OFOS,        14 },
133         { ICE_UDP_OF,           34 },
134         { ICE_VXLAN,            42 },
135         { ICE_GENEVE,           42 },
136         { ICE_VXLAN_GPE,        42 },
137         { ICE_MAC_IL,           50 },
138         { ICE_IPV4_IL,          64 },
139         { ICE_TCP_IL,           84 },
140         { ICE_PROTOCOL_LAST,    0 },
141 };
142
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
145         0x00, 0x00, 0x00, 0x00,
146         0x00, 0x00, 0x00, 0x00,
147
148         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
149
150         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151         0x00, 0x01, 0x00, 0x00,
152         0x40, 0x11, 0x00, 0x00,
153         0x00, 0x00, 0x00, 0x00,
154         0x00, 0x00, 0x00, 0x00,
155
156         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157         0x00, 0x46, 0x00, 0x00,
158
159         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160         0x00, 0x00, 0x00, 0x00,
161
162         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163         0x00, 0x00, 0x00, 0x00,
164         0x00, 0x00, 0x00, 0x00,
165         0x08, 0x00,
166
167         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168         0x00, 0x01, 0x00, 0x00,
169         0x40, 0x06, 0x00, 0x00,
170         0x00, 0x00, 0x00, 0x00,
171         0x00, 0x00, 0x00, 0x00,
172
173         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174         0x00, 0x00, 0x00, 0x00,
175         0x00, 0x00, 0x00, 0x00,
176         0x50, 0x02, 0x20, 0x00,
177         0x00, 0x00, 0x00, 0x00
178 };
179
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181         { ICE_MAC_OFOS,         0 },
182         { ICE_ETYPE_OL,         12 },
183         { ICE_IPV4_OFOS,        14 },
184         { ICE_UDP_OF,           34 },
185         { ICE_VXLAN,            42 },
186         { ICE_GENEVE,           42 },
187         { ICE_VXLAN_GPE,        42 },
188         { ICE_MAC_IL,           50 },
189         { ICE_IPV4_IL,          64 },
190         { ICE_UDP_ILOS,         84 },
191         { ICE_PROTOCOL_LAST,    0 },
192 };
193
194 static const u8 dummy_udp_tun_udp_packet[] = {
195         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
196         0x00, 0x00, 0x00, 0x00,
197         0x00, 0x00, 0x00, 0x00,
198
199         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
200
201         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202         0x00, 0x01, 0x00, 0x00,
203         0x00, 0x11, 0x00, 0x00,
204         0x00, 0x00, 0x00, 0x00,
205         0x00, 0x00, 0x00, 0x00,
206
207         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208         0x00, 0x3a, 0x00, 0x00,
209
210         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211         0x00, 0x00, 0x00, 0x00,
212
213         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214         0x00, 0x00, 0x00, 0x00,
215         0x00, 0x00, 0x00, 0x00,
216         0x08, 0x00,
217
218         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219         0x00, 0x01, 0x00, 0x00,
220         0x00, 0x11, 0x00, 0x00,
221         0x00, 0x00, 0x00, 0x00,
222         0x00, 0x00, 0x00, 0x00,
223
224         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225         0x00, 0x08, 0x00, 0x00,
226 };
227
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230         { ICE_MAC_OFOS,         0 },
231         { ICE_ETYPE_OL,         12 },
232         { ICE_IPV4_OFOS,        14 },
233         { ICE_UDP_ILOS,         34 },
234         { ICE_PROTOCOL_LAST,    0 },
235 };
236
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240         0x00, 0x00, 0x00, 0x00,
241         0x00, 0x00, 0x00, 0x00,
242
243         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
244
245         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246         0x00, 0x01, 0x00, 0x00,
247         0x00, 0x11, 0x00, 0x00,
248         0x00, 0x00, 0x00, 0x00,
249         0x00, 0x00, 0x00, 0x00,
250
251         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252         0x00, 0x08, 0x00, 0x00,
253
254         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
255 };
256
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259         { ICE_MAC_OFOS,         0 },
260         { ICE_ETYPE_OL,         12 },
261         { ICE_VLAN_OFOS,        14 },
262         { ICE_IPV4_OFOS,        18 },
263         { ICE_UDP_ILOS,         38 },
264         { ICE_PROTOCOL_LAST,    0 },
265 };
266
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270         0x00, 0x00, 0x00, 0x00,
271         0x00, 0x00, 0x00, 0x00,
272
273         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
274
275         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276
277         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278         0x00, 0x01, 0x00, 0x00,
279         0x00, 0x11, 0x00, 0x00,
280         0x00, 0x00, 0x00, 0x00,
281         0x00, 0x00, 0x00, 0x00,
282
283         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284         0x00, 0x08, 0x00, 0x00,
285
286         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
287 };
288
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291         { ICE_MAC_OFOS,         0 },
292         { ICE_ETYPE_OL,         12 },
293         { ICE_IPV4_OFOS,        14 },
294         { ICE_TCP_IL,           34 },
295         { ICE_PROTOCOL_LAST,    0 },
296 };
297
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301         0x00, 0x00, 0x00, 0x00,
302         0x00, 0x00, 0x00, 0x00,
303
304         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
305
306         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307         0x00, 0x01, 0x00, 0x00,
308         0x00, 0x06, 0x00, 0x00,
309         0x00, 0x00, 0x00, 0x00,
310         0x00, 0x00, 0x00, 0x00,
311
312         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313         0x00, 0x00, 0x00, 0x00,
314         0x00, 0x00, 0x00, 0x00,
315         0x50, 0x00, 0x00, 0x00,
316         0x00, 0x00, 0x00, 0x00,
317
318         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
319 };
320
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323         { ICE_MAC_OFOS,         0 },
324         { ICE_ETYPE_OL,         12 },
325         { ICE_VLAN_OFOS,        14 },
326         { ICE_IPV4_OFOS,        18 },
327         { ICE_TCP_IL,           38 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
338
339         0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340
341         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342         0x00, 0x01, 0x00, 0x00,
343         0x00, 0x06, 0x00, 0x00,
344         0x00, 0x00, 0x00, 0x00,
345         0x00, 0x00, 0x00, 0x00,
346
347         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348         0x00, 0x00, 0x00, 0x00,
349         0x00, 0x00, 0x00, 0x00,
350         0x50, 0x00, 0x00, 0x00,
351         0x00, 0x00, 0x00, 0x00,
352
353         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
354 };
355
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357         { ICE_MAC_OFOS,         0 },
358         { ICE_ETYPE_OL,         12 },
359         { ICE_IPV6_OFOS,        14 },
360         { ICE_TCP_IL,           54 },
361         { ICE_PROTOCOL_LAST,    0 },
362 };
363
364 static const u8 dummy_tcp_ipv6_packet[] = {
365         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
370
371         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373         0x00, 0x00, 0x00, 0x00,
374         0x00, 0x00, 0x00, 0x00,
375         0x00, 0x00, 0x00, 0x00,
376         0x00, 0x00, 0x00, 0x00,
377         0x00, 0x00, 0x00, 0x00,
378         0x00, 0x00, 0x00, 0x00,
379         0x00, 0x00, 0x00, 0x00,
380         0x00, 0x00, 0x00, 0x00,
381
382         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383         0x00, 0x00, 0x00, 0x00,
384         0x00, 0x00, 0x00, 0x00,
385         0x50, 0x00, 0x00, 0x00,
386         0x00, 0x00, 0x00, 0x00,
387
388         0x00, 0x00, /* 2 bytes for 4 byte alignment */
389 };
390
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394         { ICE_MAC_OFOS,         0 },
395         { ICE_ETYPE_OL,         12 },
396         { ICE_VLAN_OFOS,        14 },
397         { ICE_IPV6_OFOS,        18 },
398         { ICE_TCP_IL,           58 },
399         { ICE_PROTOCOL_LAST,    0 },
400 };
401
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407
408         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
409
410         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411
412         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414         0x00, 0x00, 0x00, 0x00,
415         0x00, 0x00, 0x00, 0x00,
416         0x00, 0x00, 0x00, 0x00,
417         0x00, 0x00, 0x00, 0x00,
418         0x00, 0x00, 0x00, 0x00,
419         0x00, 0x00, 0x00, 0x00,
420         0x00, 0x00, 0x00, 0x00,
421         0x00, 0x00, 0x00, 0x00,
422
423         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424         0x00, 0x00, 0x00, 0x00,
425         0x00, 0x00, 0x00, 0x00,
426         0x50, 0x00, 0x00, 0x00,
427         0x00, 0x00, 0x00, 0x00,
428
429         0x00, 0x00, /* 2 bytes for 4 byte alignment */
430 };
431
432 /* IPv6 + UDP */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434         { ICE_MAC_OFOS,         0 },
435         { ICE_ETYPE_OL,         12 },
436         { ICE_IPV6_OFOS,        14 },
437         { ICE_UDP_ILOS,         54 },
438         { ICE_PROTOCOL_LAST,    0 },
439 };
440
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444         0x00, 0x00, 0x00, 0x00,
445         0x00, 0x00, 0x00, 0x00,
446
447         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
448
449         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451         0x00, 0x00, 0x00, 0x00,
452         0x00, 0x00, 0x00, 0x00,
453         0x00, 0x00, 0x00, 0x00,
454         0x00, 0x00, 0x00, 0x00,
455         0x00, 0x00, 0x00, 0x00,
456         0x00, 0x00, 0x00, 0x00,
457         0x00, 0x00, 0x00, 0x00,
458         0x00, 0x00, 0x00, 0x00,
459
460         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461         0x00, 0x10, 0x00, 0x00,
462
463         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464         0x00, 0x00, 0x00, 0x00,
465
466         0x00, 0x00, /* 2 bytes for 4 byte alignment */
467 };
468
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
472         { ICE_MAC_OFOS,         0 },
473         { ICE_ETYPE_OL,         12 },
474         { ICE_VLAN_OFOS,        14 },
475         { ICE_IPV6_OFOS,        18 },
476         { ICE_UDP_ILOS,         58 },
477         { ICE_PROTOCOL_LAST,    0 },
478 };
479
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483         0x00, 0x00, 0x00, 0x00,
484         0x00, 0x00, 0x00, 0x00,
485
486         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
487
488         0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489
490         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492         0x00, 0x00, 0x00, 0x00,
493         0x00, 0x00, 0x00, 0x00,
494         0x00, 0x00, 0x00, 0x00,
495         0x00, 0x00, 0x00, 0x00,
496         0x00, 0x00, 0x00, 0x00,
497         0x00, 0x00, 0x00, 0x00,
498         0x00, 0x00, 0x00, 0x00,
499         0x00, 0x00, 0x00, 0x00,
500
501         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502         0x00, 0x08, 0x00, 0x00,
503
504         0x00, 0x00, /* 2 bytes for 4 byte alignment */
505 };
506
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508         { ICE_MAC_OFOS,         0 },
509         { ICE_IPV4_OFOS,        14 },
510         { ICE_UDP_OF,           34 },
511         { ICE_GTP,              42 },
512         { ICE_PROTOCOL_LAST,    0 },
513 };
514
515 static const u8 dummy_udp_gtp_packet[] = {
516         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x08, 0x00,
520
521         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522         0x00, 0x00, 0x00, 0x00,
523         0x00, 0x11, 0x00, 0x00,
524         0x00, 0x00, 0x00, 0x00,
525         0x00, 0x00, 0x00, 0x00,
526
527         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528         0x00, 0x1c, 0x00, 0x00,
529
530         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531         0x00, 0x00, 0x00, 0x00,
532         0x00, 0x00, 0x00, 0x85,
533
534         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535         0x00, 0x00, 0x00, 0x00,
536 };
537
538 static const
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
540         { ICE_MAC_OFOS,         0 },
541         { ICE_IPV4_OFOS,        14 },
542         { ICE_UDP_OF,           34 },
543         { ICE_GTP,              42 },
544         { ICE_IPV4_IL,          62 },
545         { ICE_PROTOCOL_LAST,    0 },
546 };
547
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550         0x00, 0x00, 0x00, 0x00,
551         0x00, 0x00, 0x00, 0x00,
552         0x08, 0x00,
553
554         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555         0x00, 0x00, 0x40, 0x00,
556         0x40, 0x11, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x00,
558         0x00, 0x00, 0x00, 0x00,
559
560         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561         0x00, 0x00, 0x00, 0x00,
562
563         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
564         0x00, 0x00, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x85,
566
567         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568         0x00, 0x00, 0x00, 0x00,
569
570         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571         0x00, 0x00, 0x40, 0x00,
572         0x40, 0x00, 0x00, 0x00,
573         0x00, 0x00, 0x00, 0x00,
574         0x00, 0x00, 0x00, 0x00,
575         0x00, 0x00,
576 };
577
578 static const
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
580         { ICE_MAC_OFOS,         0 },
581         { ICE_IPV4_OFOS,        14 },
582         { ICE_UDP_OF,           34 },
583         { ICE_GTP,              42 },
584         { ICE_IPV6_IL,          62 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595         0x00, 0x00, 0x40, 0x00,
596         0x40, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601         0x00, 0x00, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611         0x00, 0x00, 0x3b, 0x00,
612         0x00, 0x00, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615         0x00, 0x00, 0x00, 0x00,
616         0x00, 0x00, 0x00, 0x00,
617         0x00, 0x00, 0x00, 0x00,
618         0x00, 0x00, 0x00, 0x00,
619         0x00, 0x00, 0x00, 0x00,
620
621         0x00, 0x00,
622 };
623
624 static const
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
626         { ICE_MAC_OFOS,         0 },
627         { ICE_IPV6_OFOS,        14 },
628         { ICE_UDP_OF,           54 },
629         { ICE_GTP,              62 },
630         { ICE_IPV4_IL,          82 },
631         { ICE_PROTOCOL_LAST,    0 },
632 };
633
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636         0x00, 0x00, 0x00, 0x00,
637         0x00, 0x00, 0x00, 0x00,
638         0x86, 0xdd,
639
640         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644         0x00, 0x00, 0x00, 0x00,
645         0x00, 0x00, 0x00, 0x00,
646         0x00, 0x00, 0x00, 0x00,
647         0x00, 0x00, 0x00, 0x00,
648         0x00, 0x00, 0x00, 0x00,
649         0x00, 0x00, 0x00, 0x00,
650
651         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652         0x00, 0x00, 0x00, 0x00,
653
654         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
655         0x00, 0x00, 0x00, 0x00,
656         0x00, 0x00, 0x00, 0x85,
657
658         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659         0x00, 0x00, 0x00, 0x00,
660
661         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662         0x00, 0x00, 0x40, 0x00,
663         0x40, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665         0x00, 0x00, 0x00, 0x00,
666
667         0x00, 0x00,
668 };
669
670 static const
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
672         { ICE_MAC_OFOS,         0 },
673         { ICE_IPV6_OFOS,        14 },
674         { ICE_UDP_OF,           54 },
675         { ICE_GTP,              62 },
676         { ICE_IPV6_IL,          82 },
677         { ICE_PROTOCOL_LAST,    0 },
678 };
679
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682         0x00, 0x00, 0x00, 0x00,
683         0x00, 0x00, 0x00, 0x00,
684         0x86, 0xdd,
685
686         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688         0x00, 0x00, 0x00, 0x00,
689         0x00, 0x00, 0x00, 0x00,
690         0x00, 0x00, 0x00, 0x00,
691         0x00, 0x00, 0x00, 0x00,
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x00, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698         0x00, 0x00, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708         0x00, 0x00, 0x3b, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00,
719 };
720
721 static const
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
723         { ICE_MAC_OFOS,         0 },
724         { ICE_IPV4_OFOS,        14 },
725         { ICE_UDP_OF,           34 },
726         { ICE_GTP_NO_PAY,       42 },
727         { ICE_PROTOCOL_LAST,    0 },
728 };
729
730 static const
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
732         { ICE_MAC_OFOS,         0 },
733         { ICE_IPV6_OFOS,        14 },
734         { ICE_UDP_OF,           54 },
735         { ICE_GTP_NO_PAY,       62 },
736         { ICE_PROTOCOL_LAST,    0 },
737 };
738
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
740         { ICE_MAC_OFOS,         0 },
741         { ICE_ETYPE_OL,         12 },
742         { ICE_VLAN_OFOS,        14},
743         { ICE_PPPOE,            18 },
744         { ICE_PROTOCOL_LAST,    0 },
745 };
746
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
748         { ICE_MAC_OFOS,         0 },
749         { ICE_ETYPE_OL,         12 },
750         { ICE_VLAN_OFOS,        14},
751         { ICE_PPPOE,            18 },
752         { ICE_IPV4_OFOS,        26 },
753         { ICE_PROTOCOL_LAST,    0 },
754 };
755
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758         0x00, 0x00, 0x00, 0x00,
759         0x00, 0x00, 0x00, 0x00,
760
761         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
762
763         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
764
765         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
766         0x00, 0x16,
767
768         0x00, 0x21,             /* PPP Link Layer 24 */
769
770         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771         0x00, 0x00, 0x00, 0x00,
772         0x00, 0x00, 0x00, 0x00,
773         0x00, 0x00, 0x00, 0x00,
774         0x00, 0x00, 0x00, 0x00,
775
776         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
777 };
778
779 static const
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
781         { ICE_MAC_OFOS,         0 },
782         { ICE_ETYPE_OL,         12 },
783         { ICE_VLAN_OFOS,        14},
784         { ICE_PPPOE,            18 },
785         { ICE_IPV4_OFOS,        26 },
786         { ICE_TCP_IL,           46 },
787         { ICE_PROTOCOL_LAST,    0 },
788 };
789
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792         0x00, 0x00, 0x00, 0x00,
793         0x00, 0x00, 0x00, 0x00,
794
795         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
796
797         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
798
799         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
800         0x00, 0x16,
801
802         0x00, 0x21,             /* PPP Link Layer 24 */
803
804         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805         0x00, 0x01, 0x00, 0x00,
806         0x00, 0x06, 0x00, 0x00,
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x00,
809
810         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811         0x00, 0x00, 0x00, 0x00,
812         0x00, 0x00, 0x00, 0x00,
813         0x50, 0x00, 0x00, 0x00,
814         0x00, 0x00, 0x00, 0x00,
815
816         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
817 };
818
819 static const
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
821         { ICE_MAC_OFOS,         0 },
822         { ICE_ETYPE_OL,         12 },
823         { ICE_VLAN_OFOS,        14},
824         { ICE_PPPOE,            18 },
825         { ICE_IPV4_OFOS,        26 },
826         { ICE_UDP_ILOS,         46 },
827         { ICE_PROTOCOL_LAST,    0 },
828 };
829
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832         0x00, 0x00, 0x00, 0x00,
833         0x00, 0x00, 0x00, 0x00,
834
835         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
836
837         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
838
839         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
840         0x00, 0x16,
841
842         0x00, 0x21,             /* PPP Link Layer 24 */
843
844         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845         0x00, 0x01, 0x00, 0x00,
846         0x00, 0x11, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849
850         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851         0x00, 0x08, 0x00, 0x00,
852
853         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
854 };
855
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
857         { ICE_MAC_OFOS,         0 },
858         { ICE_ETYPE_OL,         12 },
859         { ICE_VLAN_OFOS,        14},
860         { ICE_PPPOE,            18 },
861         { ICE_IPV6_OFOS,        26 },
862         { ICE_PROTOCOL_LAST,    0 },
863 };
864
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869
870         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
871
872         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
873
874         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
875         0x00, 0x2a,
876
877         0x00, 0x57,             /* PPP Link Layer 24 */
878
879         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880         0x00, 0x00, 0x3b, 0x00,
881         0x00, 0x00, 0x00, 0x00,
882         0x00, 0x00, 0x00, 0x00,
883         0x00, 0x00, 0x00, 0x00,
884         0x00, 0x00, 0x00, 0x00,
885         0x00, 0x00, 0x00, 0x00,
886         0x00, 0x00, 0x00, 0x00,
887         0x00, 0x00, 0x00, 0x00,
888         0x00, 0x00, 0x00, 0x00,
889
890         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
891 };
892
893 static const
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
895         { ICE_MAC_OFOS,         0 },
896         { ICE_ETYPE_OL,         12 },
897         { ICE_VLAN_OFOS,        14},
898         { ICE_PPPOE,            18 },
899         { ICE_IPV6_OFOS,        26 },
900         { ICE_TCP_IL,           66 },
901         { ICE_PROTOCOL_LAST,    0 },
902 };
903
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
910
911         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
912
913         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
914         0x00, 0x2a,
915
916         0x00, 0x57,             /* PPP Link Layer 24 */
917
918         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920         0x00, 0x00, 0x00, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928
929         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930         0x00, 0x00, 0x00, 0x00,
931         0x00, 0x00, 0x00, 0x00,
932         0x50, 0x00, 0x00, 0x00,
933         0x00, 0x00, 0x00, 0x00,
934
935         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
936 };
937
938 static const
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
940         { ICE_MAC_OFOS,         0 },
941         { ICE_ETYPE_OL,         12 },
942         { ICE_VLAN_OFOS,        14},
943         { ICE_PPPOE,            18 },
944         { ICE_IPV6_OFOS,        26 },
945         { ICE_UDP_ILOS,         66 },
946         { ICE_PROTOCOL_LAST,    0 },
947 };
948
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951         0x00, 0x00, 0x00, 0x00,
952         0x00, 0x00, 0x00, 0x00,
953
954         0x81, 0x00,             /* ICE_ETYPE_OL 12 */
955
956         0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
957
958         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
959         0x00, 0x2a,
960
961         0x00, 0x57,             /* PPP Link Layer 24 */
962
963         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965         0x00, 0x00, 0x00, 0x00,
966         0x00, 0x00, 0x00, 0x00,
967         0x00, 0x00, 0x00, 0x00,
968         0x00, 0x00, 0x00, 0x00,
969         0x00, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00, 0x00, 0x00,
973
974         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975         0x00, 0x08, 0x00, 0x00,
976
977         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
978 };
979
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
981         { ICE_MAC_OFOS,         0 },
982         { ICE_IPV4_OFOS,        14 },
983         { ICE_ESP,                      34 },
984         { ICE_PROTOCOL_LAST,    0 },
985 };
986
987 static const u8 dummy_ipv4_esp_pkt[] = {
988         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989         0x00, 0x00, 0x00, 0x00,
990         0x00, 0x00, 0x00, 0x00,
991         0x08, 0x00,
992
993         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994         0x00, 0x00, 0x40, 0x00,
995         0x40, 0x32, 0x00, 0x00,
996         0x00, 0x00, 0x00, 0x00,
997         0x00, 0x00, 0x00, 0x00,
998
999         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000         0x00, 0x00, 0x00, 0x00,
1001         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1002 };
1003
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005         { ICE_MAC_OFOS,         0 },
1006         { ICE_IPV6_OFOS,        14 },
1007         { ICE_ESP,                      54 },
1008         { ICE_PROTOCOL_LAST,    0 },
1009 };
1010
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x86, 0xDD,
1016
1017         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019         0x00, 0x00, 0x00, 0x00,
1020         0x00, 0x00, 0x00, 0x00,
1021         0x00, 0x00, 0x00, 0x00,
1022         0x00, 0x00, 0x00, 0x00,
1023         0x00, 0x00, 0x00, 0x00,
1024         0x00, 0x00, 0x00, 0x00,
1025         0x00, 0x00, 0x00, 0x00,
1026         0x00, 0x00, 0x00, 0x00,
1027
1028         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029         0x00, 0x00, 0x00, 0x00,
1030         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1031 };
1032
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034         { ICE_MAC_OFOS,         0 },
1035         { ICE_IPV4_OFOS,        14 },
1036         { ICE_AH,                       34 },
1037         { ICE_PROTOCOL_LAST,    0 },
1038 };
1039
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x08, 0x00,
1045
1046         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047         0x00, 0x00, 0x40, 0x00,
1048         0x40, 0x33, 0x00, 0x00,
1049         0x00, 0x00, 0x00, 0x00,
1050         0x00, 0x00, 0x00, 0x00,
1051
1052         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053         0x00, 0x00, 0x00, 0x00,
1054         0x00, 0x00, 0x00, 0x00,
1055         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1056 };
1057
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059         { ICE_MAC_OFOS,         0 },
1060         { ICE_IPV6_OFOS,        14 },
1061         { ICE_AH,                       54 },
1062         { ICE_PROTOCOL_LAST,    0 },
1063 };
1064
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067         0x00, 0x00, 0x00, 0x00,
1068         0x00, 0x00, 0x00, 0x00,
1069         0x86, 0xDD,
1070
1071         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073         0x00, 0x00, 0x00, 0x00,
1074         0x00, 0x00, 0x00, 0x00,
1075         0x00, 0x00, 0x00, 0x00,
1076         0x00, 0x00, 0x00, 0x00,
1077         0x00, 0x00, 0x00, 0x00,
1078         0x00, 0x00, 0x00, 0x00,
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081
1082         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083         0x00, 0x00, 0x00, 0x00,
1084         0x00, 0x00, 0x00, 0x00,
1085         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1086 };
1087
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089         { ICE_MAC_OFOS,         0 },
1090         { ICE_IPV4_OFOS,        14 },
1091         { ICE_UDP_ILOS,         34 },
1092         { ICE_NAT_T,            42 },
1093         { ICE_PROTOCOL_LAST,    0 },
1094 };
1095
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x00,
1100         0x08, 0x00,
1101
1102         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103         0x00, 0x00, 0x40, 0x00,
1104         0x40, 0x11, 0x00, 0x00,
1105         0x00, 0x00, 0x00, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107
1108         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109         0x00, 0x00, 0x00, 0x00,
1110
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1114 };
1115
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117         { ICE_MAC_OFOS,         0 },
1118         { ICE_IPV6_OFOS,        14 },
1119         { ICE_UDP_ILOS,         54 },
1120         { ICE_NAT_T,            62 },
1121         { ICE_PROTOCOL_LAST,    0 },
1122 };
1123
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126         0x00, 0x00, 0x00, 0x00,
1127         0x00, 0x00, 0x00, 0x00,
1128         0x86, 0xDD,
1129
1130         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132         0x00, 0x00, 0x00, 0x00,
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x00, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137         0x00, 0x00, 0x00, 0x00,
1138         0x00, 0x00, 0x00, 0x00,
1139         0x00, 0x00, 0x00, 0x00,
1140
1141         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142         0x00, 0x00, 0x00, 0x00,
1143
1144         0x00, 0x00, 0x00, 0x00,
1145         0x00, 0x00, 0x00, 0x00,
1146         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_L2TPV3,           34 },
1154         { ICE_PROTOCOL_LAST,    0 },
1155 };
1156
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159         0x00, 0x00, 0x00, 0x00,
1160         0x00, 0x00, 0x00, 0x00,
1161         0x08, 0x00,
1162
1163         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164         0x00, 0x00, 0x40, 0x00,
1165         0x40, 0x73, 0x00, 0x00,
1166         0x00, 0x00, 0x00, 0x00,
1167         0x00, 0x00, 0x00, 0x00,
1168
1169         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170         0x00, 0x00, 0x00, 0x00,
1171         0x00, 0x00, 0x00, 0x00,
1172         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1173 };
1174
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176         { ICE_MAC_OFOS,         0 },
1177         { ICE_IPV6_OFOS,        14 },
1178         { ICE_L2TPV3,           54 },
1179         { ICE_PROTOCOL_LAST,    0 },
1180 };
1181
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184         0x00, 0x00, 0x00, 0x00,
1185         0x00, 0x00, 0x00, 0x00,
1186         0x86, 0xDD,
1187
1188         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189         0x00, 0x0c, 0x73, 0x40,
1190         0x00, 0x00, 0x00, 0x00,
1191         0x00, 0x00, 0x00, 0x00,
1192         0x00, 0x00, 0x00, 0x00,
1193         0x00, 0x00, 0x00, 0x00,
1194         0x00, 0x00, 0x00, 0x00,
1195         0x00, 0x00, 0x00, 0x00,
1196         0x00, 0x00, 0x00, 0x00,
1197         0x00, 0x00, 0x00, 0x00,
1198
1199         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200         0x00, 0x00, 0x00, 0x00,
1201         0x00, 0x00, 0x00, 0x00,
1202         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1203 };
1204
1205 /* this is a recipe to profile association bitmap */
1206 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1207                           ICE_MAX_NUM_PROFILES);
1208
1209 /* this is a profile to recipe association bitmap */
1210 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1211                           ICE_MAX_NUM_RECIPES);
1212
1213 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1214
1215 /**
1216  * ice_collect_result_idx - copy result index values
1217  * @buf: buffer that contains the result index
1218  * @recp: the recipe struct to copy data into
1219  */
1220 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1221                                    struct ice_sw_recipe *recp)
1222 {
1223         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1224                 ice_set_bit(buf->content.result_indx &
1225                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1226 }
1227
1228 /**
1229  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1230  * @rid: recipe ID that we are populating
1231  */
1232 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid)
1233 {
1234         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1235         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1236         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1237         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1238         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
1239         u16 i, j, profile_num = 0;
1240         bool non_tun_valid = false;
1241         bool pppoe_valid = false;
1242         bool vxlan_valid = false;
1243         bool gre_valid = false;
1244         bool gtp_valid = false;
1245         bool flag_valid = false;
1246
1247         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1248                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1249                         continue;
1250                 else
1251                         profile_num++;
1252
1253                 for (i = 0; i < 12; i++) {
1254                         if (gre_profile[i] == j)
1255                                 gre_valid = true;
1256                 }
1257
1258                 for (i = 0; i < 12; i++) {
1259                         if (vxlan_profile[i] == j)
1260                                 vxlan_valid = true;
1261                 }
1262
1263                 for (i = 0; i < 7; i++) {
1264                         if (pppoe_profile[i] == j)
1265                                 pppoe_valid = true;
1266                 }
1267
1268                 for (i = 0; i < 6; i++) {
1269                         if (non_tun_profile[i] == j)
1270                                 non_tun_valid = true;
1271                 }
1272
1273                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1274                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1275                         gtp_valid = true;
1276
1277                 if ((j >= ICE_PROFID_IPV4_ESP &&
1278                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1279                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1280                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1281                         flag_valid = true;
1282         }
1283
1284         if (!non_tun_valid && vxlan_valid)
1285                 tun_type = ICE_SW_TUN_VXLAN;
1286         else if (!non_tun_valid && gre_valid)
1287                 tun_type = ICE_SW_TUN_NVGRE;
1288         else if (!non_tun_valid && pppoe_valid)
1289                 tun_type = ICE_SW_TUN_PPPOE;
1290         else if (!non_tun_valid && gtp_valid)
1291                 tun_type = ICE_SW_TUN_GTP;
1292         else if (non_tun_valid &&
1293                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1294                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1295         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1296                  !pppoe_valid)
1297                 tun_type = ICE_NON_TUN;
1298         else
1299                 tun_type = ICE_NON_TUN;
1300
1301         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1302                 i = ice_is_bit_set(recipe_to_profile[rid],
1303                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1304                 j = ice_is_bit_set(recipe_to_profile[rid],
1305                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1306                 if (i && !j)
1307                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1308                 else if (!i && j)
1309                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1310         }
1311
1312         if (tun_type == ICE_SW_TUN_GTP) {
1313                 if (ice_is_bit_set(recipe_to_profile[rid],
1314                                    ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1315                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1316                 else if (ice_is_bit_set(recipe_to_profile[rid],
1317                                         ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1318                         tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1319                 else if (ice_is_bit_set(recipe_to_profile[rid],
1320                                         ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1321                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1322                 else if (ice_is_bit_set(recipe_to_profile[rid],
1323                                         ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1324                         tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1325         }
1326
1327         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1328                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1329                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1330                                 switch (j) {
1331                                 case ICE_PROFID_IPV4_TCP:
1332                                         tun_type = ICE_SW_IPV4_TCP;
1333                                         break;
1334                                 case ICE_PROFID_IPV4_UDP:
1335                                         tun_type = ICE_SW_IPV4_UDP;
1336                                         break;
1337                                 case ICE_PROFID_IPV6_TCP:
1338                                         tun_type = ICE_SW_IPV6_TCP;
1339                                         break;
1340                                 case ICE_PROFID_IPV6_UDP:
1341                                         tun_type = ICE_SW_IPV6_UDP;
1342                                         break;
1343                                 case ICE_PROFID_PPPOE_PAY:
1344                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1345                                         break;
1346                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1347                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1348                                         break;
1349                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1350                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1351                                         break;
1352                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1353                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1354                                         break;
1355                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1356                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1357                                         break;
1358                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1359                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1360                                         break;
1361                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1362                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1363                                         break;
1364                                 case ICE_PROFID_IPV4_ESP:
1365                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1366                                         break;
1367                                 case ICE_PROFID_IPV6_ESP:
1368                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1369                                         break;
1370                                 case ICE_PROFID_IPV4_AH:
1371                                         tun_type = ICE_SW_TUN_IPV4_AH;
1372                                         break;
1373                                 case ICE_PROFID_IPV6_AH:
1374                                         tun_type = ICE_SW_TUN_IPV6_AH;
1375                                         break;
1376                                 case ICE_PROFID_IPV4_NAT_T:
1377                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
1378                                         break;
1379                                 case ICE_PROFID_IPV6_NAT_T:
1380                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
1381                                         break;
1382                                 case ICE_PROFID_IPV4_PFCP_NODE:
1383                                         tun_type =
1384                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1385                                         break;
1386                                 case ICE_PROFID_IPV6_PFCP_NODE:
1387                                         tun_type =
1388                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1389                                         break;
1390                                 case ICE_PROFID_IPV4_PFCP_SESSION:
1391                                         tun_type =
1392                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1393                                         break;
1394                                 case ICE_PROFID_IPV6_PFCP_SESSION:
1395                                         tun_type =
1396                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1397                                         break;
1398                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
1399                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1400                                         break;
1401                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
1402                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1403                                         break;
1404                                 case ICE_PROFID_IPV4_GTPU_TEID:
1405                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1406                                         break;
1407                                 case ICE_PROFID_IPV6_GTPU_TEID:
1408                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1409                                         break;
1410                                 default:
1411                                         break;
1412                                 }
1413
1414                                 return tun_type;
1415                         }
1416                 }
1417         }
1418
1419         return tun_type;
1420 }
1421
1422 /**
1423  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1424  * @hw: pointer to hardware structure
1425  * @recps: struct that we need to populate
1426  * @rid: recipe ID that we are populating
1427  * @refresh_required: true if we should get recipe to profile mapping from FW
1428  *
1429  * This function is used to populate all the necessary entries into our
1430  * bookkeeping so that we have a current list of all the recipes that are
1431  * programmed in the firmware.
1432  */
1433 static enum ice_status
1434 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1435                     bool *refresh_required)
1436 {
1437         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1438         struct ice_aqc_recipe_data_elem *tmp;
1439         u16 num_recps = ICE_MAX_NUM_RECIPES;
1440         struct ice_prot_lkup_ext *lkup_exts;
1441         enum ice_status status;
1442         u8 fv_word_idx = 0;
1443         u16 sub_recps;
1444
1445         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1446
1447         /* we need a buffer big enough to accommodate all the recipes */
1448         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1449                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1450         if (!tmp)
1451                 return ICE_ERR_NO_MEMORY;
1452
1453         tmp[0].recipe_indx = rid;
1454         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1455         /* non-zero status meaning recipe doesn't exist */
1456         if (status)
1457                 goto err_unroll;
1458
1459         /* Get recipe to profile map so that we can get the fv from lkups that
1460          * we read for a recipe from FW. Since we want to minimize the number of
1461          * times we make this FW call, just make one call and cache the copy
1462          * until a new recipe is added. This operation is only required the
1463          * first time to get the changes from FW. Then to search existing
1464          * entries we don't need to update the cache again until another recipe
1465          * gets added.
1466          */
1467         if (*refresh_required) {
1468                 ice_get_recp_to_prof_map(hw);
1469                 *refresh_required = false;
1470         }
1471
1472         /* Start populating all the entries for recps[rid] based on lkups from
1473          * firmware. Note that we are only creating the root recipe in our
1474          * database.
1475          */
1476         lkup_exts = &recps[rid].lkup_exts;
1477
1478         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1479                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1480                 struct ice_recp_grp_entry *rg_entry;
1481                 u8 i, prof, idx, prot = 0;
1482                 bool is_root;
1483                 u16 off = 0;
1484
1485                 rg_entry = (struct ice_recp_grp_entry *)
1486                         ice_malloc(hw, sizeof(*rg_entry));
1487                 if (!rg_entry) {
1488                         status = ICE_ERR_NO_MEMORY;
1489                         goto err_unroll;
1490                 }
1491
1492                 idx = root_bufs.recipe_indx;
1493                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1494
1495                 /* Mark all result indices in this chain */
1496                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1497                         ice_set_bit(root_bufs.content.result_indx &
1498                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1499
1500                 /* get the first profile that is associated with rid */
1501                 prof = ice_find_first_bit(recipe_to_profile[idx],
1502                                           ICE_MAX_NUM_PROFILES);
1503                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1504                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1505
1506                         rg_entry->fv_idx[i] = lkup_indx;
1507                         rg_entry->fv_mask[i] =
1508                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1509
1510                         /* If the recipe is a chained recipe then all its
1511                          * child recipe's result will have a result index.
1512                          * To fill fv_words we should not use those result
1513                          * index, we only need the protocol ids and offsets.
1514                          * We will skip all the fv_idx which stores result
1515                          * index in them. We also need to skip any fv_idx which
1516                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1517                          * valid offset value.
1518                          */
1519                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1520                                            rg_entry->fv_idx[i]) ||
1521                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1522                             rg_entry->fv_idx[i] == 0)
1523                                 continue;
1524
1525                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
1526                                           rg_entry->fv_idx[i], &prot, &off);
1527                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1528                         lkup_exts->fv_words[fv_word_idx].off = off;
1529                         lkup_exts->field_mask[fv_word_idx] =
1530                                 rg_entry->fv_mask[i];
1531                         fv_word_idx++;
1532                 }
1533                 /* populate rg_list with the data from the child entry of this
1534                  * recipe
1535                  */
1536                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1537
1538                 /* Propagate some data to the recipe database */
1539                 recps[idx].is_root = !!is_root;
1540                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1541                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1542                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1543                         recps[idx].chain_idx = root_bufs.content.result_indx &
1544                                 ~ICE_AQ_RECIPE_RESULT_EN;
1545                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1546                 } else {
1547                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1548                 }
1549
1550                 if (!is_root)
1551                         continue;
1552
1553                 /* Only do the following for root recipes entries */
1554                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1555                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1556                 recps[idx].root_rid = root_bufs.content.rid &
1557                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
1558                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1559         }
1560
1561         /* Complete initialization of the root recipe entry */
1562         lkup_exts->n_val_words = fv_word_idx;
1563         recps[rid].big_recp = (num_recps > 1);
1564         recps[rid].n_grp_count = (u8)num_recps;
1565         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid);
1566         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1567                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
1568                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1569         if (!recps[rid].root_buf)
1570                 goto err_unroll;
1571
1572         /* Copy result indexes */
1573         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1574         recps[rid].recp_created = true;
1575
1576 err_unroll:
1577         ice_free(hw, tmp);
1578         return status;
1579 }
1580
1581 /**
1582  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1583  * @hw: pointer to hardware structure
1584  *
1585  * This function is used to populate recipe_to_profile matrix where index to
1586  * this array is the recipe ID and the element is the mapping of which profiles
1587  * is this recipe mapped to.
1588  */
1589 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1590 {
1591         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1592         u16 i;
1593
1594         for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
1595                 u16 j;
1596
1597                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1598                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1599                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1600                         continue;
1601                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1602                               ICE_MAX_NUM_RECIPES);
1603                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1604                         ice_set_bit(i, recipe_to_profile[j]);
1605         }
1606 }
1607
1608 /**
1609  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1610  * @hw: pointer to the HW struct
1611  * @recp_list: pointer to sw recipe list
1612  *
1613  * Allocate memory for the entire recipe table and initialize the structures/
1614  * entries corresponding to basic recipes.
1615  */
1616 enum ice_status
1617 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1618 {
1619         struct ice_sw_recipe *recps;
1620         u8 i;
1621
1622         recps = (struct ice_sw_recipe *)
1623                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1624         if (!recps)
1625                 return ICE_ERR_NO_MEMORY;
1626
1627         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1628                 recps[i].root_rid = i;
1629                 INIT_LIST_HEAD(&recps[i].filt_rules);
1630                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1631                 INIT_LIST_HEAD(&recps[i].rg_list);
1632                 ice_init_lock(&recps[i].filt_rule_lock);
1633         }
1634
1635         *recp_list = recps;
1636
1637         return ICE_SUCCESS;
1638 }
1639
1640 /**
1641  * ice_aq_get_sw_cfg - get switch configuration
1642  * @hw: pointer to the hardware structure
1643  * @buf: pointer to the result buffer
1644  * @buf_size: length of the buffer available for response
1645  * @req_desc: pointer to requested descriptor
1646  * @num_elems: pointer to number of elements
1647  * @cd: pointer to command details structure or NULL
1648  *
1649  * Get switch configuration (0x0200) to be placed in buf.
1650  * This admin command returns information such as initial VSI/port number
1651  * and switch ID it belongs to.
1652  *
1653  * NOTE: *req_desc is both an input/output parameter.
1654  * The caller of this function first calls this function with *request_desc set
1655  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1656  * configuration information has been returned; if non-zero (meaning not all
1657  * the information was returned), the caller should call this function again
1658  * with *req_desc set to the previous value returned by f/w to get the
1659  * next block of switch configuration information.
1660  *
1661  * *num_elems is output only parameter. This reflects the number of elements
1662  * in response buffer. The caller of this function to use *num_elems while
1663  * parsing the response buffer.
1664  */
1665 static enum ice_status
1666 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1667                   u16 buf_size, u16 *req_desc, u16 *num_elems,
1668                   struct ice_sq_cd *cd)
1669 {
1670         struct ice_aqc_get_sw_cfg *cmd;
1671         struct ice_aq_desc desc;
1672         enum ice_status status;
1673
1674         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1675         cmd = &desc.params.get_sw_conf;
1676         cmd->element = CPU_TO_LE16(*req_desc);
1677
1678         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1679         if (!status) {
1680                 *req_desc = LE16_TO_CPU(cmd->element);
1681                 *num_elems = LE16_TO_CPU(cmd->num_elems);
1682         }
1683
1684         return status;
1685 }
1686
1687 /**
1688  * ice_alloc_sw - allocate resources specific to switch
1689  * @hw: pointer to the HW struct
1690  * @ena_stats: true to turn on VEB stats
1691  * @shared_res: true for shared resource, false for dedicated resource
1692  * @sw_id: switch ID returned
1693  * @counter_id: VEB counter ID returned
1694  *
1695  * allocates switch resources (SWID and VEB counter) (0x0208)
1696  */
1697 enum ice_status
1698 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1699              u16 *counter_id)
1700 {
1701         struct ice_aqc_alloc_free_res_elem *sw_buf;
1702         struct ice_aqc_res_elem *sw_ele;
1703         enum ice_status status;
1704         u16 buf_len;
1705
1706         buf_len = ice_struct_size(sw_buf, elem, 1);
1707         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1708         if (!sw_buf)
1709                 return ICE_ERR_NO_MEMORY;
1710
1711         /* Prepare buffer for switch ID.
1712          * The number of resource entries in buffer is passed as 1 since only a
1713          * single switch/VEB instance is allocated, and hence a single sw_id
1714          * is requested.
1715          */
1716         sw_buf->num_elems = CPU_TO_LE16(1);
1717         sw_buf->res_type =
1718                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1719                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1720                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1721
1722         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1723                                        ice_aqc_opc_alloc_res, NULL);
1724
1725         if (status)
1726                 goto ice_alloc_sw_exit;
1727
1728         sw_ele = &sw_buf->elem[0];
1729         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1730
1731         if (ena_stats) {
1732                 /* Prepare buffer for VEB Counter */
1733                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1734                 struct ice_aqc_alloc_free_res_elem *counter_buf;
1735                 struct ice_aqc_res_elem *counter_ele;
1736
1737                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1738                                 ice_malloc(hw, buf_len);
1739                 if (!counter_buf) {
1740                         status = ICE_ERR_NO_MEMORY;
1741                         goto ice_alloc_sw_exit;
1742                 }
1743
1744                 /* The number of resource entries in buffer is passed as 1 since
1745                  * only a single switch/VEB instance is allocated, and hence a
1746                  * single VEB counter is requested.
1747                  */
1748                 counter_buf->num_elems = CPU_TO_LE16(1);
1749                 counter_buf->res_type =
1750                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1751                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1752                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1753                                                opc, NULL);
1754
1755                 if (status) {
1756                         ice_free(hw, counter_buf);
1757                         goto ice_alloc_sw_exit;
1758                 }
1759                 counter_ele = &counter_buf->elem[0];
1760                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1761                 ice_free(hw, counter_buf);
1762         }
1763
1764 ice_alloc_sw_exit:
1765         ice_free(hw, sw_buf);
1766         return status;
1767 }
1768
1769 /**
1770  * ice_free_sw - free resources specific to switch
1771  * @hw: pointer to the HW struct
1772  * @sw_id: switch ID returned
1773  * @counter_id: VEB counter ID returned
1774  *
1775  * free switch resources (SWID and VEB counter) (0x0209)
1776  *
1777  * NOTE: This function frees multiple resources. It continues
1778  * releasing other resources even after it encounters error.
1779  * The error code returned is the last error it encountered.
1780  */
1781 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
1782 {
1783         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
1784         enum ice_status status, ret_status;
1785         u16 buf_len;
1786
1787         buf_len = ice_struct_size(sw_buf, elem, 1);
1788         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1789         if (!sw_buf)
1790                 return ICE_ERR_NO_MEMORY;
1791
1792         /* Prepare buffer to free for switch ID res.
1793          * The number of resource entries in buffer is passed as 1 since only a
1794          * single switch/VEB instance is freed, and hence a single sw_id
1795          * is released.
1796          */
1797         sw_buf->num_elems = CPU_TO_LE16(1);
1798         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
1799         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
1800
1801         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1802                                            ice_aqc_opc_free_res, NULL);
1803
1804         if (ret_status)
1805                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1806
1807         /* Prepare buffer to free for VEB Counter resource */
1808         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1809                         ice_malloc(hw, buf_len);
1810         if (!counter_buf) {
1811                 ice_free(hw, sw_buf);
1812                 return ICE_ERR_NO_MEMORY;
1813         }
1814
1815         /* The number of resource entries in buffer is passed as 1 since only a
1816          * single switch/VEB instance is freed, and hence a single VEB counter
1817          * is released
1818          */
1819         counter_buf->num_elems = CPU_TO_LE16(1);
1820         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
1821         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
1822
1823         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1824                                        ice_aqc_opc_free_res, NULL);
1825         if (status) {
1826                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
1827                 ret_status = status;
1828         }
1829
1830         ice_free(hw, counter_buf);
1831         ice_free(hw, sw_buf);
1832         return ret_status;
1833 }
1834
1835 /**
1836  * ice_aq_add_vsi
1837  * @hw: pointer to the HW struct
1838  * @vsi_ctx: pointer to a VSI context struct
1839  * @cd: pointer to command details structure or NULL
1840  *
1841  * Add a VSI context to the hardware (0x0210)
1842  */
1843 enum ice_status
1844 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1845                struct ice_sq_cd *cd)
1846 {
1847         struct ice_aqc_add_update_free_vsi_resp *res;
1848         struct ice_aqc_add_get_update_free_vsi *cmd;
1849         struct ice_aq_desc desc;
1850         enum ice_status status;
1851
1852         cmd = &desc.params.vsi_cmd;
1853         res = &desc.params.add_update_free_vsi_res;
1854
1855         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1856
1857         if (!vsi_ctx->alloc_from_pool)
1858                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
1859                                            ICE_AQ_VSI_IS_VALID);
1860
1861         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
1862
1863         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1864
1865         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1866                                  sizeof(vsi_ctx->info), cd);
1867
1868         if (!status) {
1869                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1870                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
1871                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
1872         }
1873
1874         return status;
1875 }
1876
1877 /**
1878  * ice_aq_free_vsi
1879  * @hw: pointer to the HW struct
1880  * @vsi_ctx: pointer to a VSI context struct
1881  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1882  * @cd: pointer to command details structure or NULL
1883  *
1884  * Free VSI context info from hardware (0x0213)
1885  */
1886 enum ice_status
1887 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1888                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1889 {
1890         struct ice_aqc_add_update_free_vsi_resp *resp;
1891         struct ice_aqc_add_get_update_free_vsi *cmd;
1892         struct ice_aq_desc desc;
1893         enum ice_status status;
1894
1895         cmd = &desc.params.vsi_cmd;
1896         resp = &desc.params.add_update_free_vsi_res;
1897
1898         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1899
1900         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1901         if (keep_vsi_alloc)
1902                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
1903
1904         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1905         if (!status) {
1906                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1907                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1908         }
1909
1910         return status;
1911 }
1912
1913 /**
1914  * ice_aq_update_vsi
1915  * @hw: pointer to the HW struct
1916  * @vsi_ctx: pointer to a VSI context struct
1917  * @cd: pointer to command details structure or NULL
1918  *
1919  * Update VSI context in the hardware (0x0211)
1920  */
1921 enum ice_status
1922 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1923                   struct ice_sq_cd *cd)
1924 {
1925         struct ice_aqc_add_update_free_vsi_resp *resp;
1926         struct ice_aqc_add_get_update_free_vsi *cmd;
1927         struct ice_aq_desc desc;
1928         enum ice_status status;
1929
1930         cmd = &desc.params.vsi_cmd;
1931         resp = &desc.params.add_update_free_vsi_res;
1932
1933         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1934
1935         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1936
1937         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1938
1939         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1940                                  sizeof(vsi_ctx->info), cd);
1941
1942         if (!status) {
1943                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
1944                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
1945         }
1946
1947         return status;
1948 }
1949
1950 /**
1951  * ice_is_vsi_valid - check whether the VSI is valid or not
1952  * @hw: pointer to the HW struct
1953  * @vsi_handle: VSI handle
1954  *
1955  * check whether the VSI is valid or not
1956  */
1957 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1958 {
1959         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1960 }
1961
1962 /**
1963  * ice_get_hw_vsi_num - return the HW VSI number
1964  * @hw: pointer to the HW struct
1965  * @vsi_handle: VSI handle
1966  *
1967  * return the HW VSI number
1968  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1969  */
1970 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1971 {
1972         return hw->vsi_ctx[vsi_handle]->vsi_num;
1973 }
1974
1975 /**
1976  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1977  * @hw: pointer to the HW struct
1978  * @vsi_handle: VSI handle
1979  *
1980  * return the VSI context entry for a given VSI handle
1981  */
1982 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1983 {
1984         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1985 }
1986
1987 /**
1988  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1989  * @hw: pointer to the HW struct
1990  * @vsi_handle: VSI handle
1991  * @vsi: VSI context pointer
1992  *
1993  * save the VSI context entry for a given VSI handle
1994  */
1995 static void
1996 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1997 {
1998         hw->vsi_ctx[vsi_handle] = vsi;
1999 }
2000
2001 /**
2002  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2003  * @hw: pointer to the HW struct
2004  * @vsi_handle: VSI handle
2005  */
2006 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2007 {
2008         struct ice_vsi_ctx *vsi;
2009         u8 i;
2010
2011         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2012         if (!vsi)
2013                 return;
2014         ice_for_each_traffic_class(i) {
2015                 if (vsi->lan_q_ctx[i]) {
2016                         ice_free(hw, vsi->lan_q_ctx[i]);
2017                         vsi->lan_q_ctx[i] = NULL;
2018                 }
2019         }
2020 }
2021
2022 /**
2023  * ice_clear_vsi_ctx - clear the VSI context entry
2024  * @hw: pointer to the HW struct
2025  * @vsi_handle: VSI handle
2026  *
2027  * clear the VSI context entry
2028  */
2029 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2030 {
2031         struct ice_vsi_ctx *vsi;
2032
2033         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2034         if (vsi) {
2035                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2036                 ice_free(hw, vsi);
2037                 hw->vsi_ctx[vsi_handle] = NULL;
2038         }
2039 }
2040
2041 /**
2042  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2043  * @hw: pointer to the HW struct
2044  */
2045 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2046 {
2047         u16 i;
2048
2049         for (i = 0; i < ICE_MAX_VSI; i++)
2050                 ice_clear_vsi_ctx(hw, i);
2051 }
2052
2053 /**
2054  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2055  * @hw: pointer to the HW struct
2056  * @vsi_handle: unique VSI handle provided by drivers
2057  * @vsi_ctx: pointer to a VSI context struct
2058  * @cd: pointer to command details structure or NULL
2059  *
2060  * Add a VSI context to the hardware also add it into the VSI handle list.
2061  * If this function gets called after reset for existing VSIs then update
2062  * with the new HW VSI number in the corresponding VSI handle list entry.
2063  */
2064 enum ice_status
2065 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2066             struct ice_sq_cd *cd)
2067 {
2068         struct ice_vsi_ctx *tmp_vsi_ctx;
2069         enum ice_status status;
2070
2071         if (vsi_handle >= ICE_MAX_VSI)
2072                 return ICE_ERR_PARAM;
2073         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2074         if (status)
2075                 return status;
2076         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2077         if (!tmp_vsi_ctx) {
2078                 /* Create a new VSI context */
2079                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2080                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2081                 if (!tmp_vsi_ctx) {
2082                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2083                         return ICE_ERR_NO_MEMORY;
2084                 }
2085                 *tmp_vsi_ctx = *vsi_ctx;
2086
2087                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2088         } else {
2089                 /* update with new HW VSI num */
2090                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2091         }
2092
2093         return ICE_SUCCESS;
2094 }
2095
2096 /**
2097  * ice_free_vsi- free VSI context from hardware and VSI handle list
2098  * @hw: pointer to the HW struct
2099  * @vsi_handle: unique VSI handle
2100  * @vsi_ctx: pointer to a VSI context struct
2101  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2102  * @cd: pointer to command details structure or NULL
2103  *
2104  * Free VSI context info from hardware as well as from VSI handle list
2105  */
2106 enum ice_status
2107 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2108              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2109 {
2110         enum ice_status status;
2111
2112         if (!ice_is_vsi_valid(hw, vsi_handle))
2113                 return ICE_ERR_PARAM;
2114         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2115         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2116         if (!status)
2117                 ice_clear_vsi_ctx(hw, vsi_handle);
2118         return status;
2119 }
2120
2121 /**
2122  * ice_update_vsi
2123  * @hw: pointer to the HW struct
2124  * @vsi_handle: unique VSI handle
2125  * @vsi_ctx: pointer to a VSI context struct
2126  * @cd: pointer to command details structure or NULL
2127  *
2128  * Update VSI context in the hardware
2129  */
2130 enum ice_status
2131 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2132                struct ice_sq_cd *cd)
2133 {
2134         if (!ice_is_vsi_valid(hw, vsi_handle))
2135                 return ICE_ERR_PARAM;
2136         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2137         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2138 }
2139
2140 /**
2141  * ice_aq_get_vsi_params
2142  * @hw: pointer to the HW struct
2143  * @vsi_ctx: pointer to a VSI context struct
2144  * @cd: pointer to command details structure or NULL
2145  *
2146  * Get VSI context info from hardware (0x0212)
2147  */
2148 enum ice_status
2149 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2150                       struct ice_sq_cd *cd)
2151 {
2152         struct ice_aqc_add_get_update_free_vsi *cmd;
2153         struct ice_aqc_get_vsi_resp *resp;
2154         struct ice_aq_desc desc;
2155         enum ice_status status;
2156
2157         cmd = &desc.params.vsi_cmd;
2158         resp = &desc.params.get_vsi_resp;
2159
2160         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2161
2162         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2163
2164         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2165                                  sizeof(vsi_ctx->info), cd);
2166         if (!status) {
2167                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2168                                         ICE_AQ_VSI_NUM_M;
2169                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2170                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2171         }
2172
2173         return status;
2174 }
2175
2176 /**
2177  * ice_aq_add_update_mir_rule - add/update a mirror rule
2178  * @hw: pointer to the HW struct
2179  * @rule_type: Rule Type
2180  * @dest_vsi: VSI number to which packets will be mirrored
2181  * @count: length of the list
2182  * @mr_buf: buffer for list of mirrored VSI numbers
2183  * @cd: pointer to command details structure or NULL
2184  * @rule_id: Rule ID
2185  *
2186  * Add/Update Mirror Rule (0x260).
2187  */
2188 enum ice_status
2189 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2190                            u16 count, struct ice_mir_rule_buf *mr_buf,
2191                            struct ice_sq_cd *cd, u16 *rule_id)
2192 {
2193         struct ice_aqc_add_update_mir_rule *cmd;
2194         struct ice_aq_desc desc;
2195         enum ice_status status;
2196         __le16 *mr_list = NULL;
2197         u16 buf_size = 0;
2198
2199         switch (rule_type) {
2200         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2201         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2202                 /* Make sure count and mr_buf are set for these rule_types */
2203                 if (!(count && mr_buf))
2204                         return ICE_ERR_PARAM;
2205
2206                 buf_size = count * sizeof(__le16);
2207                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2208                 if (!mr_list)
2209                         return ICE_ERR_NO_MEMORY;
2210                 break;
2211         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2212         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2213                 /* Make sure count and mr_buf are not set for these
2214                  * rule_types
2215                  */
2216                 if (count || mr_buf)
2217                         return ICE_ERR_PARAM;
2218                 break;
2219         default:
2220                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2221                 return ICE_ERR_OUT_OF_RANGE;
2222         }
2223
2224         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2225
2226         /* Pre-process 'mr_buf' items for add/update of virtual port
2227          * ingress/egress mirroring (but not physical port ingress/egress
2228          * mirroring)
2229          */
2230         if (mr_buf) {
2231                 int i;
2232
2233                 for (i = 0; i < count; i++) {
2234                         u16 id;
2235
2236                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2237
2238                         /* Validate specified VSI number, make sure it is less
2239                          * than ICE_MAX_VSI, if not return with error.
2240                          */
2241                         if (id >= ICE_MAX_VSI) {
2242                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2243                                           id);
2244                                 ice_free(hw, mr_list);
2245                                 return ICE_ERR_OUT_OF_RANGE;
2246                         }
2247
2248                         /* add VSI to mirror rule */
2249                         if (mr_buf[i].add)
2250                                 mr_list[i] =
2251                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2252                         else /* remove VSI from mirror rule */
2253                                 mr_list[i] = CPU_TO_LE16(id);
2254                 }
2255         }
2256
2257         cmd = &desc.params.add_update_rule;
2258         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2259                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2260                                            ICE_AQC_RULE_ID_VALID_M);
2261         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2262         cmd->num_entries = CPU_TO_LE16(count);
2263         cmd->dest = CPU_TO_LE16(dest_vsi);
2264
2265         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2266         if (!status)
2267                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2268
2269         ice_free(hw, mr_list);
2270
2271         return status;
2272 }
2273
2274 /**
2275  * ice_aq_delete_mir_rule - delete a mirror rule
2276  * @hw: pointer to the HW struct
2277  * @rule_id: Mirror rule ID (to be deleted)
2278  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2279  *               otherwise it is returned to the shared pool
2280  * @cd: pointer to command details structure or NULL
2281  *
2282  * Delete Mirror Rule (0x261).
2283  */
2284 enum ice_status
2285 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2286                        struct ice_sq_cd *cd)
2287 {
2288         struct ice_aqc_delete_mir_rule *cmd;
2289         struct ice_aq_desc desc;
2290
2291         /* rule_id should be in the range 0...63 */
2292         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2293                 return ICE_ERR_OUT_OF_RANGE;
2294
2295         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2296
2297         cmd = &desc.params.del_rule;
2298         rule_id |= ICE_AQC_RULE_ID_VALID_M;
2299         cmd->rule_id = CPU_TO_LE16(rule_id);
2300
2301         if (keep_allocd)
2302                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2303
2304         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2305 }
2306
2307 /**
2308  * ice_aq_alloc_free_vsi_list
2309  * @hw: pointer to the HW struct
2310  * @vsi_list_id: VSI list ID returned or used for lookup
2311  * @lkup_type: switch rule filter lookup type
2312  * @opc: switch rules population command type - pass in the command opcode
2313  *
2314  * allocates or free a VSI list resource
2315  */
2316 static enum ice_status
2317 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2318                            enum ice_sw_lkup_type lkup_type,
2319                            enum ice_adminq_opc opc)
2320 {
2321         struct ice_aqc_alloc_free_res_elem *sw_buf;
2322         struct ice_aqc_res_elem *vsi_ele;
2323         enum ice_status status;
2324         u16 buf_len;
2325
2326         buf_len = ice_struct_size(sw_buf, elem, 1);
2327         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2328         if (!sw_buf)
2329                 return ICE_ERR_NO_MEMORY;
2330         sw_buf->num_elems = CPU_TO_LE16(1);
2331
2332         if (lkup_type == ICE_SW_LKUP_MAC ||
2333             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2334             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2335             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2336             lkup_type == ICE_SW_LKUP_PROMISC ||
2337             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2338             lkup_type == ICE_SW_LKUP_LAST) {
2339                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2340         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
2341                 sw_buf->res_type =
2342                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2343         } else {
2344                 status = ICE_ERR_PARAM;
2345                 goto ice_aq_alloc_free_vsi_list_exit;
2346         }
2347
2348         if (opc == ice_aqc_opc_free_res)
2349                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2350
2351         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2352         if (status)
2353                 goto ice_aq_alloc_free_vsi_list_exit;
2354
2355         if (opc == ice_aqc_opc_alloc_res) {
2356                 vsi_ele = &sw_buf->elem[0];
2357                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2358         }
2359
2360 ice_aq_alloc_free_vsi_list_exit:
2361         ice_free(hw, sw_buf);
2362         return status;
2363 }
2364
2365 /**
2366  * ice_aq_set_storm_ctrl - Sets storm control configuration
2367  * @hw: pointer to the HW struct
2368  * @bcast_thresh: represents the upper threshold for broadcast storm control
2369  * @mcast_thresh: represents the upper threshold for multicast storm control
2370  * @ctl_bitmask: storm control control knobs
2371  *
2372  * Sets the storm control configuration (0x0280)
2373  */
2374 enum ice_status
2375 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2376                       u32 ctl_bitmask)
2377 {
2378         struct ice_aqc_storm_cfg *cmd;
2379         struct ice_aq_desc desc;
2380
2381         cmd = &desc.params.storm_conf;
2382
2383         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2384
2385         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2386         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2387         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2388
2389         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2390 }
2391
2392 /**
2393  * ice_aq_get_storm_ctrl - gets storm control configuration
2394  * @hw: pointer to the HW struct
2395  * @bcast_thresh: represents the upper threshold for broadcast storm control
2396  * @mcast_thresh: represents the upper threshold for multicast storm control
2397  * @ctl_bitmask: storm control control knobs
2398  *
2399  * Gets the storm control configuration (0x0281)
2400  */
2401 enum ice_status
2402 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2403                       u32 *ctl_bitmask)
2404 {
2405         enum ice_status status;
2406         struct ice_aq_desc desc;
2407
2408         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2409
2410         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2411         if (!status) {
2412                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2413
2414                 if (bcast_thresh)
2415                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2416                                 ICE_AQ_THRESHOLD_M;
2417                 if (mcast_thresh)
2418                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2419                                 ICE_AQ_THRESHOLD_M;
2420                 if (ctl_bitmask)
2421                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2422         }
2423
2424         return status;
2425 }
2426
2427 /**
2428  * ice_aq_sw_rules - add/update/remove switch rules
2429  * @hw: pointer to the HW struct
2430  * @rule_list: pointer to switch rule population list
2431  * @rule_list_sz: total size of the rule list in bytes
2432  * @num_rules: number of switch rules in the rule_list
2433  * @opc: switch rules population command type - pass in the command opcode
2434  * @cd: pointer to command details structure or NULL
2435  *
2436  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2437  */
2438 static enum ice_status
2439 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2440                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2441 {
2442         struct ice_aq_desc desc;
2443         enum ice_status status;
2444
2445         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2446
2447         if (opc != ice_aqc_opc_add_sw_rules &&
2448             opc != ice_aqc_opc_update_sw_rules &&
2449             opc != ice_aqc_opc_remove_sw_rules)
2450                 return ICE_ERR_PARAM;
2451
2452         ice_fill_dflt_direct_cmd_desc(&desc, opc);
2453
2454         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2455         desc.params.sw_rules.num_rules_fltr_entry_index =
2456                 CPU_TO_LE16(num_rules);
2457         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2458         if (opc != ice_aqc_opc_add_sw_rules &&
2459             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2460                 status = ICE_ERR_DOES_NOT_EXIST;
2461
2462         return status;
2463 }
2464
2465 /**
2466  * ice_aq_add_recipe - add switch recipe
2467  * @hw: pointer to the HW struct
2468  * @s_recipe_list: pointer to switch rule population list
2469  * @num_recipes: number of switch recipes in the list
2470  * @cd: pointer to command details structure or NULL
2471  *
2472  * Add(0x0290)
2473  */
2474 enum ice_status
2475 ice_aq_add_recipe(struct ice_hw *hw,
2476                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2477                   u16 num_recipes, struct ice_sq_cd *cd)
2478 {
2479         struct ice_aqc_add_get_recipe *cmd;
2480         struct ice_aq_desc desc;
2481         u16 buf_size;
2482
2483         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2484         cmd = &desc.params.add_get_recipe;
2485         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2486
2487         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2488         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2489
2490         buf_size = num_recipes * sizeof(*s_recipe_list);
2491
2492         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2493 }
2494
2495 /**
2496  * ice_aq_get_recipe - get switch recipe
2497  * @hw: pointer to the HW struct
2498  * @s_recipe_list: pointer to switch rule population list
2499  * @num_recipes: pointer to the number of recipes (input and output)
2500  * @recipe_root: root recipe number of recipe(s) to retrieve
2501  * @cd: pointer to command details structure or NULL
2502  *
2503  * Get(0x0292)
2504  *
2505  * On input, *num_recipes should equal the number of entries in s_recipe_list.
2506  * On output, *num_recipes will equal the number of entries returned in
2507  * s_recipe_list.
2508  *
2509  * The caller must supply enough space in s_recipe_list to hold all possible
2510  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2511  */
2512 enum ice_status
2513 ice_aq_get_recipe(struct ice_hw *hw,
2514                   struct ice_aqc_recipe_data_elem *s_recipe_list,
2515                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2516 {
2517         struct ice_aqc_add_get_recipe *cmd;
2518         struct ice_aq_desc desc;
2519         enum ice_status status;
2520         u16 buf_size;
2521
2522         if (*num_recipes != ICE_MAX_NUM_RECIPES)
2523                 return ICE_ERR_PARAM;
2524
2525         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2526         cmd = &desc.params.add_get_recipe;
2527         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2528
2529         cmd->return_index = CPU_TO_LE16(recipe_root);
2530         cmd->num_sub_recipes = 0;
2531
2532         buf_size = *num_recipes * sizeof(*s_recipe_list);
2533
2534         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2535         /* cppcheck-suppress constArgument */
2536         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2537
2538         return status;
2539 }
2540
2541 /**
2542  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2543  * @hw: pointer to the HW struct
2544  * @profile_id: package profile ID to associate the recipe with
2545  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2546  * @cd: pointer to command details structure or NULL
2547  * Recipe to profile association (0x0291)
2548  */
2549 enum ice_status
2550 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2551                              struct ice_sq_cd *cd)
2552 {
2553         struct ice_aqc_recipe_to_profile *cmd;
2554         struct ice_aq_desc desc;
2555
2556         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2557         cmd = &desc.params.recipe_to_profile;
2558         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2559         cmd->profile_id = CPU_TO_LE16(profile_id);
2560         /* Set the recipe ID bit in the bitmask to let the device know which
2561          * profile we are associating the recipe to
2562          */
2563         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2564                    ICE_NONDMA_TO_NONDMA);
2565
2566         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2567 }
2568
2569 /**
2570  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2571  * @hw: pointer to the HW struct
2572  * @profile_id: package profile ID to associate the recipe with
2573  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2574  * @cd: pointer to command details structure or NULL
2575  * Associate profile ID with given recipe (0x0293)
2576  */
2577 enum ice_status
2578 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2579                              struct ice_sq_cd *cd)
2580 {
2581         struct ice_aqc_recipe_to_profile *cmd;
2582         struct ice_aq_desc desc;
2583         enum ice_status status;
2584
2585         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2586         cmd = &desc.params.recipe_to_profile;
2587         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2588         cmd->profile_id = CPU_TO_LE16(profile_id);
2589
2590         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2591         if (!status)
2592                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
2593                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2594
2595         return status;
2596 }
2597
2598 /**
2599  * ice_alloc_recipe - add recipe resource
2600  * @hw: pointer to the hardware structure
2601  * @rid: recipe ID returned as response to AQ call
2602  */
2603 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2604 {
2605         struct ice_aqc_alloc_free_res_elem *sw_buf;
2606         enum ice_status status;
2607         u16 buf_len;
2608
2609         buf_len = ice_struct_size(sw_buf, elem, 1);
2610         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2611         if (!sw_buf)
2612                 return ICE_ERR_NO_MEMORY;
2613
2614         sw_buf->num_elems = CPU_TO_LE16(1);
2615         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2616                                         ICE_AQC_RES_TYPE_S) |
2617                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
2618         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2619                                        ice_aqc_opc_alloc_res, NULL);
2620         if (!status)
2621                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2622         ice_free(hw, sw_buf);
2623
2624         return status;
2625 }
2626
2627 /* ice_init_port_info - Initialize port_info with switch configuration data
2628  * @pi: pointer to port_info
2629  * @vsi_port_num: VSI number or port number
2630  * @type: Type of switch element (port or VSI)
2631  * @swid: switch ID of the switch the element is attached to
2632  * @pf_vf_num: PF or VF number
2633  * @is_vf: true if the element is a VF, false otherwise
2634  */
2635 static void
2636 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2637                    u16 swid, u16 pf_vf_num, bool is_vf)
2638 {
2639         switch (type) {
2640         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2641                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2642                 pi->sw_id = swid;
2643                 pi->pf_vf_num = pf_vf_num;
2644                 pi->is_vf = is_vf;
2645                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2646                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2647                 break;
2648         default:
2649                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2650                 break;
2651         }
2652 }
2653
2654 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2655  * @hw: pointer to the hardware structure
2656  */
2657 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2658 {
2659         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2660         enum ice_status status;
2661         u8 num_total_ports;
2662         u16 req_desc = 0;
2663         u16 num_elems;
2664         u8 j = 0;
2665         u16 i;
2666
2667         num_total_ports = 1;
2668
2669         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2670                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2671
2672         if (!rbuf)
2673                 return ICE_ERR_NO_MEMORY;
2674
2675         /* Multiple calls to ice_aq_get_sw_cfg may be required
2676          * to get all the switch configuration information. The need
2677          * for additional calls is indicated by ice_aq_get_sw_cfg
2678          * writing a non-zero value in req_desc
2679          */
2680         do {
2681                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2682
2683                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2684                                            &req_desc, &num_elems, NULL);
2685
2686                 if (status)
2687                         break;
2688
2689                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2690                         u16 pf_vf_num, swid, vsi_port_num;
2691                         bool is_vf = false;
2692                         u8 res_type;
2693
2694                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2695                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2696
2697                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2698                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2699
2700                         swid = LE16_TO_CPU(ele->swid);
2701
2702                         if (LE16_TO_CPU(ele->pf_vf_num) &
2703                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2704                                 is_vf = true;
2705
2706                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2707                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2708
2709                         switch (res_type) {
2710                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2711                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2712                                 if (j == num_total_ports) {
2713                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2714                                         status = ICE_ERR_CFG;
2715                                         goto out;
2716                                 }
2717                                 ice_init_port_info(hw->port_info,
2718                                                    vsi_port_num, res_type, swid,
2719                                                    pf_vf_num, is_vf);
2720                                 j++;
2721                                 break;
2722                         default:
2723                                 break;
2724                         }
2725                 }
2726         } while (req_desc && !status);
2727
2728 out:
2729         ice_free(hw, (void *)rbuf);
2730         return status;
2731 }
2732
2733 /**
2734  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2735  * @hw: pointer to the hardware structure
2736  * @fi: filter info structure to fill/update
2737  *
2738  * This helper function populates the lb_en and lan_en elements of the provided
2739  * ice_fltr_info struct using the switch's type and characteristics of the
2740  * switch rule being configured.
2741  */
2742 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2743 {
2744         if ((fi->flag & ICE_FLTR_RX) &&
2745             (fi->fltr_act == ICE_FWD_TO_VSI ||
2746              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2747             fi->lkup_type == ICE_SW_LKUP_LAST)
2748                 fi->lan_en = true;
2749         fi->lb_en = false;
2750         fi->lan_en = false;
2751         if ((fi->flag & ICE_FLTR_TX) &&
2752             (fi->fltr_act == ICE_FWD_TO_VSI ||
2753              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2754              fi->fltr_act == ICE_FWD_TO_Q ||
2755              fi->fltr_act == ICE_FWD_TO_QGRP)) {
2756                 /* Setting LB for prune actions will result in replicated
2757                  * packets to the internal switch that will be dropped.
2758                  */
2759                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2760                         fi->lb_en = true;
2761
2762                 /* Set lan_en to TRUE if
2763                  * 1. The switch is a VEB AND
2764                  * 2
2765                  * 2.1 The lookup is a directional lookup like ethertype,
2766                  * promiscuous, ethertype-MAC, promiscuous-VLAN
2767                  * and default-port OR
2768                  * 2.2 The lookup is VLAN, OR
2769                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2770                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2771                  *
2772                  * OR
2773                  *
2774                  * The switch is a VEPA.
2775                  *
2776                  * In all other cases, the LAN enable has to be set to false.
2777                  */
2778                 if (hw->evb_veb) {
2779                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2780                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2781                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2782                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2783                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
2784                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
2785                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
2786                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
2787                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2788                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
2789                                 fi->lan_en = true;
2790                 } else {
2791                         fi->lan_en = true;
2792                 }
2793         }
2794 }
2795
2796 /**
2797  * ice_fill_sw_rule - Helper function to fill switch rule structure
2798  * @hw: pointer to the hardware structure
2799  * @f_info: entry containing packet forwarding information
2800  * @s_rule: switch rule structure to be filled in based on mac_entry
2801  * @opc: switch rules population command type - pass in the command opcode
2802  */
2803 static void
2804 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2805                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
2806 {
2807         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2808         void *daddr = NULL;
2809         u16 eth_hdr_sz;
2810         u8 *eth_hdr;
2811         u32 act = 0;
2812         __be16 *off;
2813         u8 q_rgn;
2814
2815         if (opc == ice_aqc_opc_remove_sw_rules) {
2816                 s_rule->pdata.lkup_tx_rx.act = 0;
2817                 s_rule->pdata.lkup_tx_rx.index =
2818                         CPU_TO_LE16(f_info->fltr_rule_id);
2819                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
2820                 return;
2821         }
2822
2823         eth_hdr_sz = sizeof(dummy_eth_header);
2824         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
2825
2826         /* initialize the ether header with a dummy header */
2827         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
2828         ice_fill_sw_info(hw, f_info);
2829
2830         switch (f_info->fltr_act) {
2831         case ICE_FWD_TO_VSI:
2832                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2833                         ICE_SINGLE_ACT_VSI_ID_M;
2834                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2835                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2836                                 ICE_SINGLE_ACT_VALID_BIT;
2837                 break;
2838         case ICE_FWD_TO_VSI_LIST:
2839                 act |= ICE_SINGLE_ACT_VSI_LIST;
2840                 act |= (f_info->fwd_id.vsi_list_id <<
2841                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2842                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
2843                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2844                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2845                                 ICE_SINGLE_ACT_VALID_BIT;
2846                 break;
2847         case ICE_FWD_TO_Q:
2848                 act |= ICE_SINGLE_ACT_TO_Q;
2849                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2850                         ICE_SINGLE_ACT_Q_INDEX_M;
2851                 break;
2852         case ICE_DROP_PACKET:
2853                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2854                         ICE_SINGLE_ACT_VALID_BIT;
2855                 break;
2856         case ICE_FWD_TO_QGRP:
2857                 q_rgn = f_info->qgrp_size > 0 ?
2858                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
2859                 act |= ICE_SINGLE_ACT_TO_Q;
2860                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2861                         ICE_SINGLE_ACT_Q_INDEX_M;
2862                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2863                         ICE_SINGLE_ACT_Q_REGION_M;
2864                 break;
2865         default:
2866                 return;
2867         }
2868
2869         if (f_info->lb_en)
2870                 act |= ICE_SINGLE_ACT_LB_ENABLE;
2871         if (f_info->lan_en)
2872                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2873
2874         switch (f_info->lkup_type) {
2875         case ICE_SW_LKUP_MAC:
2876                 daddr = f_info->l_data.mac.mac_addr;
2877                 break;
2878         case ICE_SW_LKUP_VLAN:
2879                 vlan_id = f_info->l_data.vlan.vlan_id;
2880                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2881                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2882                         act |= ICE_SINGLE_ACT_PRUNE;
2883                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2884                 }
2885                 break;
2886         case ICE_SW_LKUP_ETHERTYPE_MAC:
2887                 daddr = f_info->l_data.ethertype_mac.mac_addr;
2888                 /* fall-through */
2889         case ICE_SW_LKUP_ETHERTYPE:
2890                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2891                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
2892                 break;
2893         case ICE_SW_LKUP_MAC_VLAN:
2894                 daddr = f_info->l_data.mac_vlan.mac_addr;
2895                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2896                 break;
2897         case ICE_SW_LKUP_PROMISC_VLAN:
2898                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2899                 /* fall-through */
2900         case ICE_SW_LKUP_PROMISC:
2901                 daddr = f_info->l_data.mac_vlan.mac_addr;
2902                 break;
2903         default:
2904                 break;
2905         }
2906
2907         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
2908                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2909                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
2910
2911         /* Recipe set depending on lookup type */
2912         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
2913         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
2914         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
2915
2916         if (daddr)
2917                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
2918                            ICE_NONDMA_TO_NONDMA);
2919
2920         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2921                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2922                 *off = CPU_TO_BE16(vlan_id);
2923         }
2924
2925         /* Create the switch rule with the final dummy Ethernet header */
2926         if (opc != ice_aqc_opc_update_sw_rules)
2927                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
2928 }
2929
2930 /**
2931  * ice_add_marker_act
2932  * @hw: pointer to the hardware structure
2933  * @m_ent: the management entry for which sw marker needs to be added
2934  * @sw_marker: sw marker to tag the Rx descriptor with
2935  * @l_id: large action resource ID
2936  *
2937  * Create a large action to hold software marker and update the switch rule
2938  * entry pointed by m_ent with newly created large action
2939  */
2940 static enum ice_status
2941 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2942                    u16 sw_marker, u16 l_id)
2943 {
2944         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
2945         /* For software marker we need 3 large actions
2946          * 1. FWD action: FWD TO VSI or VSI LIST
2947          * 2. GENERIC VALUE action to hold the profile ID
2948          * 3. GENERIC VALUE action to hold the software marker ID
2949          */
2950         const u16 num_lg_acts = 3;
2951         enum ice_status status;
2952         u16 lg_act_size;
2953         u16 rules_size;
2954         u32 act;
2955         u16 id;
2956
2957         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2958                 return ICE_ERR_PARAM;
2959
2960         /* Create two back-to-back switch rules and submit them to the HW using
2961          * one memory buffer:
2962          *    1. Large Action
2963          *    2. Look up Tx Rx
2964          */
2965         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
2966         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2967         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
2968         if (!lg_act)
2969                 return ICE_ERR_NO_MEMORY;
2970
2971         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
2972
2973         /* Fill in the first switch rule i.e. large action */
2974         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
2975         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
2976         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
2977
2978         /* First action VSI forwarding or VSI list forwarding depending on how
2979          * many VSIs
2980          */
2981         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2982                 m_ent->fltr_info.fwd_id.hw_vsi_id;
2983
2984         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2985         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
2986                 ICE_LG_ACT_VSI_LIST_ID_M;
2987         if (m_ent->vsi_count > 1)
2988                 act |= ICE_LG_ACT_VSI_LIST;
2989         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
2990
2991         /* Second action descriptor type */
2992         act = ICE_LG_ACT_GENERIC;
2993
2994         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2995         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
2996
2997         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2998                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2999
3000         /* Third action Marker value */
3001         act |= ICE_LG_ACT_GENERIC;
3002         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3003                 ICE_LG_ACT_GENERIC_VALUE_M;
3004
3005         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3006
3007         /* call the fill switch rule to fill the lookup Tx Rx structure */
3008         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3009                          ice_aqc_opc_update_sw_rules);
3010
3011         /* Update the action to point to the large action ID */
3012         rx_tx->pdata.lkup_tx_rx.act =
3013                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3014                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3015                              ICE_SINGLE_ACT_PTR_VAL_M));
3016
3017         /* Use the filter rule ID of the previously created rule with single
3018          * act. Once the update happens, hardware will treat this as large
3019          * action
3020          */
3021         rx_tx->pdata.lkup_tx_rx.index =
3022                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3023
3024         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3025                                  ice_aqc_opc_update_sw_rules, NULL);
3026         if (!status) {
3027                 m_ent->lg_act_idx = l_id;
3028                 m_ent->sw_marker_id = sw_marker;
3029         }
3030
3031         ice_free(hw, lg_act);
3032         return status;
3033 }
3034
3035 /**
3036  * ice_add_counter_act - add/update filter rule with counter action
3037  * @hw: pointer to the hardware structure
3038  * @m_ent: the management entry for which counter needs to be added
3039  * @counter_id: VLAN counter ID returned as part of allocate resource
3040  * @l_id: large action resource ID
3041  */
3042 static enum ice_status
3043 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3044                     u16 counter_id, u16 l_id)
3045 {
3046         struct ice_aqc_sw_rules_elem *lg_act;
3047         struct ice_aqc_sw_rules_elem *rx_tx;
3048         enum ice_status status;
3049         /* 2 actions will be added while adding a large action counter */
3050         const int num_acts = 2;
3051         u16 lg_act_size;
3052         u16 rules_size;
3053         u16 f_rule_id;
3054         u32 act;
3055         u16 id;
3056
3057         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3058                 return ICE_ERR_PARAM;
3059
3060         /* Create two back-to-back switch rules and submit them to the HW using
3061          * one memory buffer:
3062          * 1. Large Action
3063          * 2. Look up Tx Rx
3064          */
3065         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3066         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3067         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
3068                                                                  rules_size);
3069         if (!lg_act)
3070                 return ICE_ERR_NO_MEMORY;
3071
3072         rx_tx = (struct ice_aqc_sw_rules_elem *)
3073                 ((u8 *)lg_act + lg_act_size);
3074
3075         /* Fill in the first switch rule i.e. large action */
3076         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3077         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3078         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3079
3080         /* First action VSI forwarding or VSI list forwarding depending on how
3081          * many VSIs
3082          */
3083         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3084                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3085
3086         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3087         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3088                 ICE_LG_ACT_VSI_LIST_ID_M;
3089         if (m_ent->vsi_count > 1)
3090                 act |= ICE_LG_ACT_VSI_LIST;
3091         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3092
3093         /* Second action counter ID */
3094         act = ICE_LG_ACT_STAT_COUNT;
3095         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3096                 ICE_LG_ACT_STAT_COUNT_M;
3097         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3098
3099         /* call the fill switch rule to fill the lookup Tx Rx structure */
3100         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3101                          ice_aqc_opc_update_sw_rules);
3102
3103         act = ICE_SINGLE_ACT_PTR;
3104         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3105         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3106
3107         /* Use the filter rule ID of the previously created rule with single
3108          * act. Once the update happens, hardware will treat this as large
3109          * action
3110          */
3111         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3112         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3113
3114         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3115                                  ice_aqc_opc_update_sw_rules, NULL);
3116         if (!status) {
3117                 m_ent->lg_act_idx = l_id;
3118                 m_ent->counter_index = counter_id;
3119         }
3120
3121         ice_free(hw, lg_act);
3122         return status;
3123 }
3124
3125 /**
3126  * ice_create_vsi_list_map
3127  * @hw: pointer to the hardware structure
3128  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3129  * @num_vsi: number of VSI handles in the array
3130  * @vsi_list_id: VSI list ID generated as part of allocate resource
3131  *
3132  * Helper function to create a new entry of VSI list ID to VSI mapping
3133  * using the given VSI list ID
3134  */
3135 static struct ice_vsi_list_map_info *
3136 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3137                         u16 vsi_list_id)
3138 {
3139         struct ice_switch_info *sw = hw->switch_info;
3140         struct ice_vsi_list_map_info *v_map;
3141         int i;
3142
3143         v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
3144                 sizeof(*v_map));
3145         if (!v_map)
3146                 return NULL;
3147
3148         v_map->vsi_list_id = vsi_list_id;
3149         v_map->ref_cnt = 1;
3150         for (i = 0; i < num_vsi; i++)
3151                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3152
3153         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3154         return v_map;
3155 }
3156
3157 /**
3158  * ice_update_vsi_list_rule
3159  * @hw: pointer to the hardware structure
3160  * @vsi_handle_arr: array of VSI handles to form a VSI list
3161  * @num_vsi: number of VSI handles in the array
3162  * @vsi_list_id: VSI list ID generated as part of allocate resource
3163  * @remove: Boolean value to indicate if this is a remove action
3164  * @opc: switch rules population command type - pass in the command opcode
3165  * @lkup_type: lookup type of the filter
3166  *
3167  * Call AQ command to add a new switch rule or update existing switch rule
3168  * using the given VSI list ID
3169  */
3170 static enum ice_status
3171 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3172                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3173                          enum ice_sw_lkup_type lkup_type)
3174 {
3175         struct ice_aqc_sw_rules_elem *s_rule;
3176         enum ice_status status;
3177         u16 s_rule_size;
3178         u16 rule_type;
3179         int i;
3180
3181         if (!num_vsi)
3182                 return ICE_ERR_PARAM;
3183
3184         if (lkup_type == ICE_SW_LKUP_MAC ||
3185             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3186             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3187             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3188             lkup_type == ICE_SW_LKUP_PROMISC ||
3189             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3190             lkup_type == ICE_SW_LKUP_LAST)
3191                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3192                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3193         else if (lkup_type == ICE_SW_LKUP_VLAN)
3194                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3195                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3196         else
3197                 return ICE_ERR_PARAM;
3198
3199         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3200         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3201         if (!s_rule)
3202                 return ICE_ERR_NO_MEMORY;
3203         for (i = 0; i < num_vsi; i++) {
3204                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3205                         status = ICE_ERR_PARAM;
3206                         goto exit;
3207                 }
3208                 /* AQ call requires hw_vsi_id(s) */
3209                 s_rule->pdata.vsi_list.vsi[i] =
3210                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3211         }
3212
3213         s_rule->type = CPU_TO_LE16(rule_type);
3214         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3215         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3216
3217         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3218
3219 exit:
3220         ice_free(hw, s_rule);
3221         return status;
3222 }
3223
3224 /**
3225  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3226  * @hw: pointer to the HW struct
3227  * @vsi_handle_arr: array of VSI handles to form a VSI list
3228  * @num_vsi: number of VSI handles in the array
3229  * @vsi_list_id: stores the ID of the VSI list to be created
3230  * @lkup_type: switch rule filter's lookup type
3231  */
3232 static enum ice_status
3233 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3234                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3235 {
3236         enum ice_status status;
3237
3238         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3239                                             ice_aqc_opc_alloc_res);
3240         if (status)
3241                 return status;
3242
3243         /* Update the newly created VSI list to include the specified VSIs */
3244         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3245                                         *vsi_list_id, false,
3246                                         ice_aqc_opc_add_sw_rules, lkup_type);
3247 }
3248
3249 /**
3250  * ice_create_pkt_fwd_rule
3251  * @hw: pointer to the hardware structure
3252  * @recp_list: corresponding filter management list
3253  * @f_entry: entry containing packet forwarding information
3254  *
3255  * Create switch rule with given filter information and add an entry
3256  * to the corresponding filter management list to track this switch rule
3257  * and VSI mapping
3258  */
3259 static enum ice_status
3260 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3261                         struct ice_fltr_list_entry *f_entry)
3262 {
3263         struct ice_fltr_mgmt_list_entry *fm_entry;
3264         struct ice_aqc_sw_rules_elem *s_rule;
3265         enum ice_status status;
3266
3267         s_rule = (struct ice_aqc_sw_rules_elem *)
3268                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3269         if (!s_rule)
3270                 return ICE_ERR_NO_MEMORY;
3271         fm_entry = (struct ice_fltr_mgmt_list_entry *)
3272                    ice_malloc(hw, sizeof(*fm_entry));
3273         if (!fm_entry) {
3274                 status = ICE_ERR_NO_MEMORY;
3275                 goto ice_create_pkt_fwd_rule_exit;
3276         }
3277
3278         fm_entry->fltr_info = f_entry->fltr_info;
3279
3280         /* Initialize all the fields for the management entry */
3281         fm_entry->vsi_count = 1;
3282         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3283         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3284         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3285
3286         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3287                          ice_aqc_opc_add_sw_rules);
3288
3289         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3290                                  ice_aqc_opc_add_sw_rules, NULL);
3291         if (status) {
3292                 ice_free(hw, fm_entry);
3293                 goto ice_create_pkt_fwd_rule_exit;
3294         }
3295
3296         f_entry->fltr_info.fltr_rule_id =
3297                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3298         fm_entry->fltr_info.fltr_rule_id =
3299                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3300
3301         /* The book keeping entries will get removed when base driver
3302          * calls remove filter AQ command
3303          */
3304         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3305
3306 ice_create_pkt_fwd_rule_exit:
3307         ice_free(hw, s_rule);
3308         return status;
3309 }
3310
3311 /**
3312  * ice_update_pkt_fwd_rule
3313  * @hw: pointer to the hardware structure
3314  * @f_info: filter information for switch rule
3315  *
3316  * Call AQ command to update a previously created switch rule with a
3317  * VSI list ID
3318  */
3319 static enum ice_status
3320 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3321 {
3322         struct ice_aqc_sw_rules_elem *s_rule;
3323         enum ice_status status;
3324
3325         s_rule = (struct ice_aqc_sw_rules_elem *)
3326                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3327         if (!s_rule)
3328                 return ICE_ERR_NO_MEMORY;
3329
3330         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3331
3332         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3333
3334         /* Update switch rule with new rule set to forward VSI list */
3335         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3336                                  ice_aqc_opc_update_sw_rules, NULL);
3337
3338         ice_free(hw, s_rule);
3339         return status;
3340 }
3341
3342 /**
3343  * ice_update_sw_rule_bridge_mode
3344  * @hw: pointer to the HW struct
3345  *
3346  * Updates unicast switch filter rules based on VEB/VEPA mode
3347  */
3348 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3349 {
3350         struct ice_switch_info *sw = hw->switch_info;
3351         struct ice_fltr_mgmt_list_entry *fm_entry;
3352         enum ice_status status = ICE_SUCCESS;
3353         struct LIST_HEAD_TYPE *rule_head;
3354         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3355
3356         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3357         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3358
3359         ice_acquire_lock(rule_lock);
3360         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3361                             list_entry) {
3362                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
3363                 u8 *addr = fi->l_data.mac.mac_addr;
3364
3365                 /* Update unicast Tx rules to reflect the selected
3366                  * VEB/VEPA mode
3367                  */
3368                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3369                     (fi->fltr_act == ICE_FWD_TO_VSI ||
3370                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3371                      fi->fltr_act == ICE_FWD_TO_Q ||
3372                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
3373                         status = ice_update_pkt_fwd_rule(hw, fi);
3374                         if (status)
3375                                 break;
3376                 }
3377         }
3378
3379         ice_release_lock(rule_lock);
3380
3381         return status;
3382 }
3383
3384 /**
3385  * ice_add_update_vsi_list
3386  * @hw: pointer to the hardware structure
3387  * @m_entry: pointer to current filter management list entry
3388  * @cur_fltr: filter information from the book keeping entry
3389  * @new_fltr: filter information with the new VSI to be added
3390  *
3391  * Call AQ command to add or update previously created VSI list with new VSI.
3392  *
3393  * Helper function to do book keeping associated with adding filter information
3394  * The algorithm to do the book keeping is described below :
3395  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3396  *      if only one VSI has been added till now
3397  *              Allocate a new VSI list and add two VSIs
3398  *              to this list using switch rule command
3399  *              Update the previously created switch rule with the
3400  *              newly created VSI list ID
3401  *      if a VSI list was previously created
3402  *              Add the new VSI to the previously created VSI list set
3403  *              using the update switch rule command
3404  */
3405 static enum ice_status
3406 ice_add_update_vsi_list(struct ice_hw *hw,
3407                         struct ice_fltr_mgmt_list_entry *m_entry,
3408                         struct ice_fltr_info *cur_fltr,
3409                         struct ice_fltr_info *new_fltr)
3410 {
3411         enum ice_status status = ICE_SUCCESS;
3412         u16 vsi_list_id = 0;
3413
3414         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3415              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3416                 return ICE_ERR_NOT_IMPL;
3417
3418         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3419              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3420             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3421              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3422                 return ICE_ERR_NOT_IMPL;
3423
3424         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3425                 /* Only one entry existed in the mapping and it was not already
3426                  * a part of a VSI list. So, create a VSI list with the old and
3427                  * new VSIs.
3428                  */
3429                 struct ice_fltr_info tmp_fltr;
3430                 u16 vsi_handle_arr[2];
3431
3432                 /* A rule already exists with the new VSI being added */
3433                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3434                         return ICE_ERR_ALREADY_EXISTS;
3435
3436                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3437                 vsi_handle_arr[1] = new_fltr->vsi_handle;
3438                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3439                                                   &vsi_list_id,
3440                                                   new_fltr->lkup_type);
3441                 if (status)
3442                         return status;
3443
3444                 tmp_fltr = *new_fltr;
3445                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3446                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3447                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3448                 /* Update the previous switch rule of "MAC forward to VSI" to
3449                  * "MAC fwd to VSI list"
3450                  */
3451                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3452                 if (status)
3453                         return status;
3454
3455                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3456                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3457                 m_entry->vsi_list_info =
3458                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3459                                                 vsi_list_id);
3460
3461                 /* If this entry was large action then the large action needs
3462                  * to be updated to point to FWD to VSI list
3463                  */
3464                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3465                         status =
3466                             ice_add_marker_act(hw, m_entry,
3467                                                m_entry->sw_marker_id,
3468                                                m_entry->lg_act_idx);
3469         } else {
3470                 u16 vsi_handle = new_fltr->vsi_handle;
3471                 enum ice_adminq_opc opcode;
3472
3473                 if (!m_entry->vsi_list_info)
3474                         return ICE_ERR_CFG;
3475
3476                 /* A rule already exists with the new VSI being added */
3477                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3478                         return ICE_SUCCESS;
3479
3480                 /* Update the previously created VSI list set with
3481                  * the new VSI ID passed in
3482                  */
3483                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3484                 opcode = ice_aqc_opc_update_sw_rules;
3485
3486                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3487                                                   vsi_list_id, false, opcode,
3488                                                   new_fltr->lkup_type);
3489                 /* update VSI list mapping info with new VSI ID */
3490                 if (!status)
3491                         ice_set_bit(vsi_handle,
3492                                     m_entry->vsi_list_info->vsi_map);
3493         }
3494         if (!status)
3495                 m_entry->vsi_count++;
3496         return status;
3497 }
3498
3499 /**
3500  * ice_find_rule_entry - Search a rule entry
3501  * @list_head: head of rule list
3502  * @f_info: rule information
3503  *
3504  * Helper function to search for a given rule entry
3505  * Returns pointer to entry storing the rule if found
3506  */
3507 static struct ice_fltr_mgmt_list_entry *
3508 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3509                     struct ice_fltr_info *f_info)
3510 {
3511         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3512
3513         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3514                             list_entry) {
3515                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3516                             sizeof(f_info->l_data)) &&
3517                     f_info->flag == list_itr->fltr_info.flag) {
3518                         ret = list_itr;
3519                         break;
3520                 }
3521         }
3522         return ret;
3523 }
3524
3525 /**
3526  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3527  * @recp_list: VSI lists needs to be searched
3528  * @vsi_handle: VSI handle to be found in VSI list
3529  * @vsi_list_id: VSI list ID found containing vsi_handle
3530  *
3531  * Helper function to search a VSI list with single entry containing given VSI
3532  * handle element. This can be extended further to search VSI list with more
3533  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3534  */
3535 static struct ice_vsi_list_map_info *
3536 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3537                         u16 *vsi_list_id)
3538 {
3539         struct ice_vsi_list_map_info *map_info = NULL;
3540         struct LIST_HEAD_TYPE *list_head;
3541
3542         list_head = &recp_list->filt_rules;
3543         if (recp_list->adv_rule) {
3544                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
3545
3546                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3547                                     ice_adv_fltr_mgmt_list_entry,
3548                                     list_entry) {
3549                         if (list_itr->vsi_list_info) {
3550                                 map_info = list_itr->vsi_list_info;
3551                                 if (ice_is_bit_set(map_info->vsi_map,
3552                                                    vsi_handle)) {
3553                                         *vsi_list_id = map_info->vsi_list_id;
3554                                         return map_info;
3555                                 }
3556                         }
3557                 }
3558         } else {
3559                 struct ice_fltr_mgmt_list_entry *list_itr;
3560
3561                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
3562                                     ice_fltr_mgmt_list_entry,
3563                                     list_entry) {
3564                         if (list_itr->vsi_count == 1 &&
3565                             list_itr->vsi_list_info) {
3566                                 map_info = list_itr->vsi_list_info;
3567                                 if (ice_is_bit_set(map_info->vsi_map,
3568                                                    vsi_handle)) {
3569                                         *vsi_list_id = map_info->vsi_list_id;
3570                                         return map_info;
3571                                 }
3572                         }
3573                 }
3574         }
3575         return NULL;
3576 }
3577
3578 /**
3579  * ice_add_rule_internal - add rule for a given lookup type
3580  * @hw: pointer to the hardware structure
3581  * @recp_list: recipe list for which rule has to be added
3582  * @lport: logic port number on which function add rule
3583  * @f_entry: structure containing MAC forwarding information
3584  *
3585  * Adds or updates the rule lists for a given recipe
3586  */
3587 static enum ice_status
3588 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3589                       u8 lport, struct ice_fltr_list_entry *f_entry)
3590 {
3591         struct ice_fltr_info *new_fltr, *cur_fltr;
3592         struct ice_fltr_mgmt_list_entry *m_entry;
3593         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3594         enum ice_status status = ICE_SUCCESS;
3595
3596         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3597                 return ICE_ERR_PARAM;
3598
3599         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3600         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3601                 f_entry->fltr_info.fwd_id.hw_vsi_id =
3602                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3603
3604         rule_lock = &recp_list->filt_rule_lock;
3605
3606         ice_acquire_lock(rule_lock);
3607         new_fltr = &f_entry->fltr_info;
3608         if (new_fltr->flag & ICE_FLTR_RX)
3609                 new_fltr->src = lport;
3610         else if (new_fltr->flag & ICE_FLTR_TX)
3611                 new_fltr->src =
3612                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3613
3614         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3615         if (!m_entry) {
3616                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3617                 goto exit_add_rule_internal;
3618         }
3619
3620         cur_fltr = &m_entry->fltr_info;
3621         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3622
3623 exit_add_rule_internal:
3624         ice_release_lock(rule_lock);
3625         return status;
3626 }
3627
3628 /**
3629  * ice_remove_vsi_list_rule
3630  * @hw: pointer to the hardware structure
3631  * @vsi_list_id: VSI list ID generated as part of allocate resource
3632  * @lkup_type: switch rule filter lookup type
3633  *
3634  * The VSI list should be emptied before this function is called to remove the
3635  * VSI list.
3636  */
3637 static enum ice_status
3638 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3639                          enum ice_sw_lkup_type lkup_type)
3640 {
3641         /* Free the vsi_list resource that we allocated. It is assumed that the
3642          * list is empty at this point.
3643          */
3644         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3645                                             ice_aqc_opc_free_res);
3646 }
3647
3648 /**
3649  * ice_rem_update_vsi_list
3650  * @hw: pointer to the hardware structure
3651  * @vsi_handle: VSI handle of the VSI to remove
3652  * @fm_list: filter management entry for which the VSI list management needs to
3653  *           be done
3654  */
3655 static enum ice_status
3656 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3657                         struct ice_fltr_mgmt_list_entry *fm_list)
3658 {
3659         enum ice_sw_lkup_type lkup_type;
3660         enum ice_status status = ICE_SUCCESS;
3661         u16 vsi_list_id;
3662
3663         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3664             fm_list->vsi_count == 0)
3665                 return ICE_ERR_PARAM;
3666
3667         /* A rule with the VSI being removed does not exist */
3668         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3669                 return ICE_ERR_DOES_NOT_EXIST;
3670
3671         lkup_type = fm_list->fltr_info.lkup_type;
3672         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3673         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3674                                           ice_aqc_opc_update_sw_rules,
3675                                           lkup_type);
3676         if (status)
3677                 return status;
3678
3679         fm_list->vsi_count--;
3680         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3681
3682         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3683                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3684                 struct ice_vsi_list_map_info *vsi_list_info =
3685                         fm_list->vsi_list_info;
3686                 u16 rem_vsi_handle;
3687
3688                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3689                                                     ICE_MAX_VSI);
3690                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3691                         return ICE_ERR_OUT_OF_RANGE;
3692
3693                 /* Make sure VSI list is empty before removing it below */
3694                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3695                                                   vsi_list_id, true,
3696                                                   ice_aqc_opc_update_sw_rules,
3697                                                   lkup_type);
3698                 if (status)
3699                         return status;
3700
3701                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3702                 tmp_fltr_info.fwd_id.hw_vsi_id =
3703                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
3704                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3705                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3706                 if (status) {
3707                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3708                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
3709                         return status;
3710                 }
3711
3712                 fm_list->fltr_info = tmp_fltr_info;
3713         }
3714
3715         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3716             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3717                 struct ice_vsi_list_map_info *vsi_list_info =
3718                         fm_list->vsi_list_info;
3719
3720                 /* Remove the VSI list since it is no longer used */
3721                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3722                 if (status) {
3723                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3724                                   vsi_list_id, status);
3725                         return status;
3726                 }
3727
3728                 LIST_DEL(&vsi_list_info->list_entry);
3729                 ice_free(hw, vsi_list_info);
3730                 fm_list->vsi_list_info = NULL;
3731         }
3732
3733         return status;
3734 }
3735
3736 /**
3737  * ice_remove_rule_internal - Remove a filter rule of a given type
3738  *
3739  * @hw: pointer to the hardware structure
3740  * @recp_list: recipe list for which the rule needs to removed
3741  * @f_entry: rule entry containing filter information
3742  */
3743 static enum ice_status
3744 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3745                          struct ice_fltr_list_entry *f_entry)
3746 {
3747         struct ice_fltr_mgmt_list_entry *list_elem;
3748         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3749         enum ice_status status = ICE_SUCCESS;
3750         bool remove_rule = false;
3751         u16 vsi_handle;
3752
3753         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3754                 return ICE_ERR_PARAM;
3755         f_entry->fltr_info.fwd_id.hw_vsi_id =
3756                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3757
3758         rule_lock = &recp_list->filt_rule_lock;
3759         ice_acquire_lock(rule_lock);
3760         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3761                                         &f_entry->fltr_info);
3762         if (!list_elem) {
3763                 status = ICE_ERR_DOES_NOT_EXIST;
3764                 goto exit;
3765         }
3766
3767         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3768                 remove_rule = true;
3769         } else if (!list_elem->vsi_list_info) {
3770                 status = ICE_ERR_DOES_NOT_EXIST;
3771                 goto exit;
3772         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3773                 /* a ref_cnt > 1 indicates that the vsi_list is being
3774                  * shared by multiple rules. Decrement the ref_cnt and
3775                  * remove this rule, but do not modify the list, as it
3776                  * is in-use by other rules.
3777                  */
3778                 list_elem->vsi_list_info->ref_cnt--;
3779                 remove_rule = true;
3780         } else {
3781                 /* a ref_cnt of 1 indicates the vsi_list is only used
3782                  * by one rule. However, the original removal request is only
3783                  * for a single VSI. Update the vsi_list first, and only
3784                  * remove the rule if there are no further VSIs in this list.
3785                  */
3786                 vsi_handle = f_entry->fltr_info.vsi_handle;
3787                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3788                 if (status)
3789                         goto exit;
3790                 /* if VSI count goes to zero after updating the VSI list */
3791                 if (list_elem->vsi_count == 0)
3792                         remove_rule = true;
3793         }
3794
3795         if (remove_rule) {
3796                 /* Remove the lookup rule */
3797                 struct ice_aqc_sw_rules_elem *s_rule;
3798
3799                 s_rule = (struct ice_aqc_sw_rules_elem *)
3800                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
3801                 if (!s_rule) {
3802                         status = ICE_ERR_NO_MEMORY;
3803                         goto exit;
3804                 }
3805
3806                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3807                                  ice_aqc_opc_remove_sw_rules);
3808
3809                 status = ice_aq_sw_rules(hw, s_rule,
3810                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
3811                                          ice_aqc_opc_remove_sw_rules, NULL);
3812
3813                 /* Remove a book keeping from the list */
3814                 ice_free(hw, s_rule);
3815
3816                 if (status)
3817                         goto exit;
3818
3819                 LIST_DEL(&list_elem->list_entry);
3820                 ice_free(hw, list_elem);
3821         }
3822 exit:
3823         ice_release_lock(rule_lock);
3824         return status;
3825 }
3826
3827 /**
3828  * ice_aq_get_res_alloc - get allocated resources
3829  * @hw: pointer to the HW struct
3830  * @num_entries: pointer to u16 to store the number of resource entries returned
3831  * @buf: pointer to buffer
3832  * @buf_size: size of buf
3833  * @cd: pointer to command details structure or NULL
3834  *
3835  * The caller-supplied buffer must be large enough to store the resource
3836  * information for all resource types. Each resource type is an
3837  * ice_aqc_get_res_resp_elem structure.
3838  */
3839 enum ice_status
3840 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
3841                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
3842                      struct ice_sq_cd *cd)
3843 {
3844         struct ice_aqc_get_res_alloc *resp;
3845         enum ice_status status;
3846         struct ice_aq_desc desc;
3847
3848         if (!buf)
3849                 return ICE_ERR_BAD_PTR;
3850
3851         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
3852                 return ICE_ERR_INVAL_SIZE;
3853
3854         resp = &desc.params.get_res;
3855
3856         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
3857         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3858
3859         if (!status && num_entries)
3860                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
3861
3862         return status;
3863 }
3864
3865 /**
3866  * ice_aq_get_res_descs - get allocated resource descriptors
3867  * @hw: pointer to the hardware structure
3868  * @num_entries: number of resource entries in buffer
3869  * @buf: structure to hold response data buffer
3870  * @buf_size: size of buffer
3871  * @res_type: resource type
3872  * @res_shared: is resource shared
3873  * @desc_id: input - first desc ID to start; output - next desc ID
3874  * @cd: pointer to command details structure or NULL
3875  */
3876 enum ice_status
3877 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
3878                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
3879                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
3880 {
3881         struct ice_aqc_get_allocd_res_desc *cmd;
3882         struct ice_aq_desc desc;
3883         enum ice_status status;
3884
3885         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3886
3887         cmd = &desc.params.get_res_desc;
3888
3889         if (!buf)
3890                 return ICE_ERR_PARAM;
3891
3892         if (buf_size != (num_entries * sizeof(*buf)))
3893                 return ICE_ERR_PARAM;
3894
3895         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
3896
3897         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
3898                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
3899                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
3900         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
3901
3902         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3903         if (!status)
3904                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
3905
3906         return status;
3907 }
3908
3909 /**
3910  * ice_add_mac_rule - Add a MAC address based filter rule
3911  * @hw: pointer to the hardware structure
3912  * @m_list: list of MAC addresses and forwarding information
3913  * @sw: pointer to switch info struct for which function add rule
3914  * @lport: logic port number on which function add rule
3915  *
3916  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3917  * multiple unicast addresses, the function assumes that all the
3918  * addresses are unique in a given add_mac call. It doesn't
3919  * check for duplicates in this case, removing duplicates from a given
3920  * list should be taken care of in the caller of this function.
3921  */
3922 static enum ice_status
3923 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
3924                  struct ice_switch_info *sw, u8 lport)
3925 {
3926         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
3927         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
3928         struct ice_fltr_list_entry *m_list_itr;
3929         struct LIST_HEAD_TYPE *rule_head;
3930         u16 total_elem_left, s_rule_size;
3931         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3932         enum ice_status status = ICE_SUCCESS;
3933         u16 num_unicast = 0;
3934         u8 elem_sent;
3935
3936         s_rule = NULL;
3937         rule_lock = &recp_list->filt_rule_lock;
3938         rule_head = &recp_list->filt_rules;
3939
3940         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3941                             list_entry) {
3942                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3943                 u16 vsi_handle;
3944                 u16 hw_vsi_id;
3945
3946                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3947                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3948                 if (!ice_is_vsi_valid(hw, vsi_handle))
3949                         return ICE_ERR_PARAM;
3950                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3951                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3952                 /* update the src in case it is VSI num */
3953                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3954                         return ICE_ERR_PARAM;
3955                 m_list_itr->fltr_info.src = hw_vsi_id;
3956                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3957                     IS_ZERO_ETHER_ADDR(add))
3958                         return ICE_ERR_PARAM;
3959                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3960                         /* Don't overwrite the unicast address */
3961                         ice_acquire_lock(rule_lock);
3962                         if (ice_find_rule_entry(rule_head,
3963                                                 &m_list_itr->fltr_info)) {
3964                                 ice_release_lock(rule_lock);
3965                                 return ICE_ERR_ALREADY_EXISTS;
3966                         }
3967                         ice_release_lock(rule_lock);
3968                         num_unicast++;
3969                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
3970                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
3971                         m_list_itr->status =
3972                                 ice_add_rule_internal(hw, recp_list, lport,
3973                                                       m_list_itr);
3974                         if (m_list_itr->status)
3975                                 return m_list_itr->status;
3976                 }
3977         }
3978
3979         ice_acquire_lock(rule_lock);
3980         /* Exit if no suitable entries were found for adding bulk switch rule */
3981         if (!num_unicast) {
3982                 status = ICE_SUCCESS;
3983                 goto ice_add_mac_exit;
3984         }
3985
3986         /* Allocate switch rule buffer for the bulk update for unicast */
3987         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3988         s_rule = (struct ice_aqc_sw_rules_elem *)
3989                 ice_calloc(hw, num_unicast, s_rule_size);
3990         if (!s_rule) {
3991                 status = ICE_ERR_NO_MEMORY;
3992                 goto ice_add_mac_exit;
3993         }
3994
3995         r_iter = s_rule;
3996         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
3997                             list_entry) {
3998                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3999                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4000
4001                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4002                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4003                                          ice_aqc_opc_add_sw_rules);
4004                         r_iter = (struct ice_aqc_sw_rules_elem *)
4005                                 ((u8 *)r_iter + s_rule_size);
4006                 }
4007         }
4008
4009         /* Call AQ bulk switch rule update for all unicast addresses */
4010         r_iter = s_rule;
4011         /* Call AQ switch rule in AQ_MAX chunk */
4012         for (total_elem_left = num_unicast; total_elem_left > 0;
4013              total_elem_left -= elem_sent) {
4014                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4015
4016                 elem_sent = MIN_T(u8, total_elem_left,
4017                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4018                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4019                                          elem_sent, ice_aqc_opc_add_sw_rules,
4020                                          NULL);
4021                 if (status)
4022                         goto ice_add_mac_exit;
4023                 r_iter = (struct ice_aqc_sw_rules_elem *)
4024                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4025         }
4026
4027         /* Fill up rule ID based on the value returned from FW */
4028         r_iter = s_rule;
4029         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4030                             list_entry) {
4031                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4032                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4033                 struct ice_fltr_mgmt_list_entry *fm_entry;
4034
4035                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4036                         f_info->fltr_rule_id =
4037                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4038                         f_info->fltr_act = ICE_FWD_TO_VSI;
4039                         /* Create an entry to track this MAC address */
4040                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4041                                 ice_malloc(hw, sizeof(*fm_entry));
4042                         if (!fm_entry) {
4043                                 status = ICE_ERR_NO_MEMORY;
4044                                 goto ice_add_mac_exit;
4045                         }
4046                         fm_entry->fltr_info = *f_info;
4047                         fm_entry->vsi_count = 1;
4048                         /* The book keeping entries will get removed when
4049                          * base driver calls remove filter AQ command
4050                          */
4051
4052                         LIST_ADD(&fm_entry->list_entry, rule_head);
4053                         r_iter = (struct ice_aqc_sw_rules_elem *)
4054                                 ((u8 *)r_iter + s_rule_size);
4055                 }
4056         }
4057
4058 ice_add_mac_exit:
4059         ice_release_lock(rule_lock);
4060         if (s_rule)
4061                 ice_free(hw, s_rule);
4062         return status;
4063 }
4064
4065 /**
4066  * ice_add_mac - Add a MAC address based filter rule
4067  * @hw: pointer to the hardware structure
4068  * @m_list: list of MAC addresses and forwarding information
4069  *
4070  * Function add MAC rule for logical port from HW struct
4071  */
4072 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4073 {
4074         if (!m_list || !hw)
4075                 return ICE_ERR_PARAM;
4076
4077         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4078                                 hw->port_info->lport);
4079 }
4080
4081 /**
4082  * ice_add_vlan_internal - Add one VLAN based filter rule
4083  * @hw: pointer to the hardware structure
4084  * @recp_list: recipe list for which rule has to be added
4085  * @f_entry: filter entry containing one VLAN information
4086  */
4087 static enum ice_status
4088 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4089                       struct ice_fltr_list_entry *f_entry)
4090 {
4091         struct ice_fltr_mgmt_list_entry *v_list_itr;
4092         struct ice_fltr_info *new_fltr, *cur_fltr;
4093         enum ice_sw_lkup_type lkup_type;
4094         u16 vsi_list_id = 0, vsi_handle;
4095         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4096         enum ice_status status = ICE_SUCCESS;
4097
4098         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4099                 return ICE_ERR_PARAM;
4100
4101         f_entry->fltr_info.fwd_id.hw_vsi_id =
4102                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4103         new_fltr = &f_entry->fltr_info;
4104
4105         /* VLAN ID should only be 12 bits */
4106         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4107                 return ICE_ERR_PARAM;
4108
4109         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4110                 return ICE_ERR_PARAM;
4111
4112         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4113         lkup_type = new_fltr->lkup_type;
4114         vsi_handle = new_fltr->vsi_handle;
4115         rule_lock = &recp_list->filt_rule_lock;
4116         ice_acquire_lock(rule_lock);
4117         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4118         if (!v_list_itr) {
4119                 struct ice_vsi_list_map_info *map_info = NULL;
4120
4121                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4122                         /* All VLAN pruning rules use a VSI list. Check if
4123                          * there is already a VSI list containing VSI that we
4124                          * want to add. If found, use the same vsi_list_id for
4125                          * this new VLAN rule or else create a new list.
4126                          */
4127                         map_info = ice_find_vsi_list_entry(recp_list,
4128                                                            vsi_handle,
4129                                                            &vsi_list_id);
4130                         if (!map_info) {
4131                                 status = ice_create_vsi_list_rule(hw,
4132                                                                   &vsi_handle,
4133                                                                   1,
4134                                                                   &vsi_list_id,
4135                                                                   lkup_type);
4136                                 if (status)
4137                                         goto exit;
4138                         }
4139                         /* Convert the action to forwarding to a VSI list. */
4140                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4141                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4142                 }
4143
4144                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4145                 if (!status) {
4146                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4147                                                          new_fltr);
4148                         if (!v_list_itr) {
4149                                 status = ICE_ERR_DOES_NOT_EXIST;
4150                                 goto exit;
4151                         }
4152                         /* reuse VSI list for new rule and increment ref_cnt */
4153                         if (map_info) {
4154                                 v_list_itr->vsi_list_info = map_info;
4155                                 map_info->ref_cnt++;
4156                         } else {
4157                                 v_list_itr->vsi_list_info =
4158                                         ice_create_vsi_list_map(hw, &vsi_handle,
4159                                                                 1, vsi_list_id);
4160                         }
4161                 }
4162         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4163                 /* Update existing VSI list to add new VSI ID only if it used
4164                  * by one VLAN rule.
4165                  */
4166                 cur_fltr = &v_list_itr->fltr_info;
4167                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4168                                                  new_fltr);
4169         } else {
4170                 /* If VLAN rule exists and VSI list being used by this rule is
4171                  * referenced by more than 1 VLAN rule. Then create a new VSI
4172                  * list appending previous VSI with new VSI and update existing
4173                  * VLAN rule to point to new VSI list ID
4174                  */
4175                 struct ice_fltr_info tmp_fltr;
4176                 u16 vsi_handle_arr[2];
4177                 u16 cur_handle;
4178
4179                 /* Current implementation only supports reusing VSI list with
4180                  * one VSI count. We should never hit below condition
4181                  */
4182                 if (v_list_itr->vsi_count > 1 &&
4183                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4184                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4185                         status = ICE_ERR_CFG;
4186                         goto exit;
4187                 }
4188
4189                 cur_handle =
4190                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4191                                            ICE_MAX_VSI);
4192
4193                 /* A rule already exists with the new VSI being added */
4194                 if (cur_handle == vsi_handle) {
4195                         status = ICE_ERR_ALREADY_EXISTS;
4196                         goto exit;
4197                 }
4198
4199                 vsi_handle_arr[0] = cur_handle;
4200                 vsi_handle_arr[1] = vsi_handle;
4201                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4202                                                   &vsi_list_id, lkup_type);
4203                 if (status)
4204                         goto exit;
4205
4206                 tmp_fltr = v_list_itr->fltr_info;
4207                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4208                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4209                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4210                 /* Update the previous switch rule to a new VSI list which
4211                  * includes current VSI that is requested
4212                  */
4213                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4214                 if (status)
4215                         goto exit;
4216
4217                 /* before overriding VSI list map info. decrement ref_cnt of
4218                  * previous VSI list
4219                  */
4220                 v_list_itr->vsi_list_info->ref_cnt--;
4221
4222                 /* now update to newly created list */
4223                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4224                 v_list_itr->vsi_list_info =
4225                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4226                                                 vsi_list_id);
4227                 v_list_itr->vsi_count++;
4228         }
4229
4230 exit:
4231         ice_release_lock(rule_lock);
4232         return status;
4233 }
4234
4235 /**
4236  * ice_add_vlan_rule - Add VLAN based filter rule
4237  * @hw: pointer to the hardware structure
4238  * @v_list: list of VLAN entries and forwarding information
4239  * @sw: pointer to switch info struct for which function add rule
4240  */
4241 static enum ice_status
4242 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4243                   struct ice_switch_info *sw)
4244 {
4245         struct ice_fltr_list_entry *v_list_itr;
4246         struct ice_sw_recipe *recp_list;
4247
4248         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4249         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4250                             list_entry) {
4251                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4252                         return ICE_ERR_PARAM;
4253                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4254                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4255                                                            v_list_itr);
4256                 if (v_list_itr->status)
4257                         return v_list_itr->status;
4258         }
4259         return ICE_SUCCESS;
4260 }
4261
4262 /**
4263  * ice_add_vlan - Add a VLAN based filter rule
4264  * @hw: pointer to the hardware structure
4265  * @v_list: list of VLAN and forwarding information
4266  *
4267  * Function add VLAN rule for logical port from HW struct
4268  */
4269 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4270 {
4271         if (!v_list || !hw)
4272                 return ICE_ERR_PARAM;
4273
4274         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4275 }
4276
4277 /**
4278  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4279  * @hw: pointer to the hardware structure
4280  * @mv_list: list of MAC and VLAN filters
4281  * @sw: pointer to switch info struct for which function add rule
4282  * @lport: logic port number on which function add rule
4283  *
4284  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4285  * pruning bits enabled, then it is the responsibility of the caller to make
4286  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4287  * VLAN won't be received on that VSI otherwise.
4288  */
4289 static enum ice_status
4290 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4291                       struct ice_switch_info *sw, u8 lport)
4292 {
4293         struct ice_fltr_list_entry *mv_list_itr;
4294         struct ice_sw_recipe *recp_list;
4295
4296         if (!mv_list || !hw)
4297                 return ICE_ERR_PARAM;
4298
4299         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4300         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4301                             list_entry) {
4302                 enum ice_sw_lkup_type l_type =
4303                         mv_list_itr->fltr_info.lkup_type;
4304
4305                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4306                         return ICE_ERR_PARAM;
4307                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4308                 mv_list_itr->status =
4309                         ice_add_rule_internal(hw, recp_list, lport,
4310                                               mv_list_itr);
4311                 if (mv_list_itr->status)
4312                         return mv_list_itr->status;
4313         }
4314         return ICE_SUCCESS;
4315 }
4316
4317 /**
4318  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4319  * @hw: pointer to the hardware structure
4320  * @mv_list: list of MAC VLAN addresses and forwarding information
4321  *
4322  * Function add MAC VLAN rule for logical port from HW struct
4323  */
4324 enum ice_status
4325 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4326 {
4327         if (!mv_list || !hw)
4328                 return ICE_ERR_PARAM;
4329
4330         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4331                                      hw->port_info->lport);
4332 }
4333
4334 /**
4335  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4336  * @hw: pointer to the hardware structure
4337  * @em_list: list of ether type MAC filter, MAC is optional
4338  * @sw: pointer to switch info struct for which function add rule
4339  * @lport: logic port number on which function add rule
4340  *
4341  * This function requires the caller to populate the entries in
4342  * the filter list with the necessary fields (including flags to
4343  * indicate Tx or Rx rules).
4344  */
4345 static enum ice_status
4346 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4347                      struct ice_switch_info *sw, u8 lport)
4348 {
4349         struct ice_fltr_list_entry *em_list_itr;
4350
4351         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4352                             list_entry) {
4353                 struct ice_sw_recipe *recp_list;
4354                 enum ice_sw_lkup_type l_type;
4355
4356                 l_type = em_list_itr->fltr_info.lkup_type;
4357                 recp_list = &sw->recp_list[l_type];
4358
4359                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4360                     l_type != ICE_SW_LKUP_ETHERTYPE)
4361                         return ICE_ERR_PARAM;
4362
4363                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4364                                                             lport,
4365                                                             em_list_itr);
4366                 if (em_list_itr->status)
4367                         return em_list_itr->status;
4368         }
4369         return ICE_SUCCESS;
4370 }
4371
4372 /**
4373  * ice_add_eth_mac - Add a ethertype based filter rule
4374  * @hw: pointer to the hardware structure
4375  * @em_list: list of ethertype and forwarding information
4376  *
4377  * Function add ethertype rule for logical port from HW struct
4378  */
4379 enum ice_status
4380 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4381 {
4382         if (!em_list || !hw)
4383                 return ICE_ERR_PARAM;
4384
4385         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4386                                     hw->port_info->lport);
4387 }
4388
4389 /**
4390  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4391  * @hw: pointer to the hardware structure
4392  * @em_list: list of ethertype or ethertype MAC entries
4393  * @sw: pointer to switch info struct for which function add rule
4394  */
4395 static enum ice_status
4396 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4397                         struct ice_switch_info *sw)
4398 {
4399         struct ice_fltr_list_entry *em_list_itr, *tmp;
4400
4401         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4402                                  list_entry) {
4403                 struct ice_sw_recipe *recp_list;
4404                 enum ice_sw_lkup_type l_type;
4405
4406                 l_type = em_list_itr->fltr_info.lkup_type;
4407
4408                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4409                     l_type != ICE_SW_LKUP_ETHERTYPE)
4410                         return ICE_ERR_PARAM;
4411
4412                 recp_list = &sw->recp_list[l_type];
4413                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4414                                                                em_list_itr);
4415                 if (em_list_itr->status)
4416                         return em_list_itr->status;
4417         }
4418         return ICE_SUCCESS;
4419 }
4420
4421 /**
4422  * ice_remove_eth_mac - remove a ethertype based filter rule
4423  * @hw: pointer to the hardware structure
4424  * @em_list: list of ethertype and forwarding information
4425  *
4426  */
4427 enum ice_status
4428 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4429 {
4430         if (!em_list || !hw)
4431                 return ICE_ERR_PARAM;
4432
4433         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4434 }
4435
4436 /**
4437  * ice_rem_sw_rule_info
4438  * @hw: pointer to the hardware structure
4439  * @rule_head: pointer to the switch list structure that we want to delete
4440  */
4441 static void
4442 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4443 {
4444         if (!LIST_EMPTY(rule_head)) {
4445                 struct ice_fltr_mgmt_list_entry *entry;
4446                 struct ice_fltr_mgmt_list_entry *tmp;
4447
4448                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4449                                          ice_fltr_mgmt_list_entry, list_entry) {
4450                         LIST_DEL(&entry->list_entry);
4451                         ice_free(hw, entry);
4452                 }
4453         }
4454 }
4455
4456 /**
4457  * ice_rem_adv_rule_info
4458  * @hw: pointer to the hardware structure
4459  * @rule_head: pointer to the switch list structure that we want to delete
4460  */
4461 static void
4462 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4463 {
4464         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4465         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4466
4467         if (LIST_EMPTY(rule_head))
4468                 return;
4469
4470         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4471                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
4472                 LIST_DEL(&lst_itr->list_entry);
4473                 ice_free(hw, lst_itr->lkups);
4474                 ice_free(hw, lst_itr);
4475         }
4476 }
4477
4478 /**
4479  * ice_rem_all_sw_rules_info
4480  * @hw: pointer to the hardware structure
4481  */
4482 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4483 {
4484         struct ice_switch_info *sw = hw->switch_info;
4485         u8 i;
4486
4487         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4488                 struct LIST_HEAD_TYPE *rule_head;
4489
4490                 rule_head = &sw->recp_list[i].filt_rules;
4491                 if (!sw->recp_list[i].adv_rule)
4492                         ice_rem_sw_rule_info(hw, rule_head);
4493                 else
4494                         ice_rem_adv_rule_info(hw, rule_head);
4495                 if (sw->recp_list[i].adv_rule &&
4496                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
4497                         sw->recp_list[i].adv_rule = false;
4498         }
4499 }
4500
4501 /**
4502  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4503  * @pi: pointer to the port_info structure
4504  * @vsi_handle: VSI handle to set as default
4505  * @set: true to add the above mentioned switch rule, false to remove it
4506  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4507  *
4508  * add filter rule to set/unset given VSI as default VSI for the switch
4509  * (represented by swid)
4510  */
4511 enum ice_status
4512 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4513                  u8 direction)
4514 {
4515         struct ice_aqc_sw_rules_elem *s_rule;
4516         struct ice_fltr_info f_info;
4517         struct ice_hw *hw = pi->hw;
4518         enum ice_adminq_opc opcode;
4519         enum ice_status status;
4520         u16 s_rule_size;
4521         u16 hw_vsi_id;
4522
4523         if (!ice_is_vsi_valid(hw, vsi_handle))
4524                 return ICE_ERR_PARAM;
4525         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4526
4527         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4528                             ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4529         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4530         if (!s_rule)
4531                 return ICE_ERR_NO_MEMORY;
4532
4533         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4534
4535         f_info.lkup_type = ICE_SW_LKUP_DFLT;
4536         f_info.flag = direction;
4537         f_info.fltr_act = ICE_FWD_TO_VSI;
4538         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4539
4540         if (f_info.flag & ICE_FLTR_RX) {
4541                 f_info.src = pi->lport;
4542                 f_info.src_id = ICE_SRC_ID_LPORT;
4543                 if (!set)
4544                         f_info.fltr_rule_id =
4545                                 pi->dflt_rx_vsi_rule_id;
4546         } else if (f_info.flag & ICE_FLTR_TX) {
4547                 f_info.src_id = ICE_SRC_ID_VSI;
4548                 f_info.src = hw_vsi_id;
4549                 if (!set)
4550                         f_info.fltr_rule_id =
4551                                 pi->dflt_tx_vsi_rule_id;
4552         }
4553
4554         if (set)
4555                 opcode = ice_aqc_opc_add_sw_rules;
4556         else
4557                 opcode = ice_aqc_opc_remove_sw_rules;
4558
4559         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4560
4561         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4562         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4563                 goto out;
4564         if (set) {
4565                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4566
4567                 if (f_info.flag & ICE_FLTR_TX) {
4568                         pi->dflt_tx_vsi_num = hw_vsi_id;
4569                         pi->dflt_tx_vsi_rule_id = index;
4570                 } else if (f_info.flag & ICE_FLTR_RX) {
4571                         pi->dflt_rx_vsi_num = hw_vsi_id;
4572                         pi->dflt_rx_vsi_rule_id = index;
4573                 }
4574         } else {
4575                 if (f_info.flag & ICE_FLTR_TX) {
4576                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4577                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4578                 } else if (f_info.flag & ICE_FLTR_RX) {
4579                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4580                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4581                 }
4582         }
4583
4584 out:
4585         ice_free(hw, s_rule);
4586         return status;
4587 }
4588
4589 /**
4590  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4591  * @list_head: head of rule list
4592  * @f_info: rule information
4593  *
4594  * Helper function to search for a unicast rule entry - this is to be used
4595  * to remove unicast MAC filter that is not shared with other VSIs on the
4596  * PF switch.
4597  *
4598  * Returns pointer to entry storing the rule if found
4599  */
4600 static struct ice_fltr_mgmt_list_entry *
4601 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4602                           struct ice_fltr_info *f_info)
4603 {
4604         struct ice_fltr_mgmt_list_entry *list_itr;
4605
4606         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4607                             list_entry) {
4608                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4609                             sizeof(f_info->l_data)) &&
4610                     f_info->fwd_id.hw_vsi_id ==
4611                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
4612                     f_info->flag == list_itr->fltr_info.flag)
4613                         return list_itr;
4614         }
4615         return NULL;
4616 }
4617
4618 /**
4619  * ice_remove_mac_rule - remove a MAC based filter rule
4620  * @hw: pointer to the hardware structure
4621  * @m_list: list of MAC addresses and forwarding information
4622  * @recp_list: list from which function remove MAC address
4623  *
4624  * This function removes either a MAC filter rule or a specific VSI from a
4625  * VSI list for a multicast MAC address.
4626  *
4627  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4628  * ice_add_mac. Caller should be aware that this call will only work if all
4629  * the entries passed into m_list were added previously. It will not attempt to
4630  * do a partial remove of entries that were found.
4631  */
4632 static enum ice_status
4633 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4634                     struct ice_sw_recipe *recp_list)
4635 {
4636         struct ice_fltr_list_entry *list_itr, *tmp;
4637         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4638
4639         if (!m_list)
4640                 return ICE_ERR_PARAM;
4641
4642         rule_lock = &recp_list->filt_rule_lock;
4643         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4644                                  list_entry) {
4645                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4646                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4647                 u16 vsi_handle;
4648
4649                 if (l_type != ICE_SW_LKUP_MAC)
4650                         return ICE_ERR_PARAM;
4651
4652                 vsi_handle = list_itr->fltr_info.vsi_handle;
4653                 if (!ice_is_vsi_valid(hw, vsi_handle))
4654                         return ICE_ERR_PARAM;
4655
4656                 list_itr->fltr_info.fwd_id.hw_vsi_id =
4657                                         ice_get_hw_vsi_num(hw, vsi_handle);
4658                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4659                         /* Don't remove the unicast address that belongs to
4660                          * another VSI on the switch, since it is not being
4661                          * shared...
4662                          */
4663                         ice_acquire_lock(rule_lock);
4664                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4665                                                        &list_itr->fltr_info)) {
4666                                 ice_release_lock(rule_lock);
4667                                 return ICE_ERR_DOES_NOT_EXIST;
4668                         }
4669                         ice_release_lock(rule_lock);
4670                 }
4671                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
4672                                                             list_itr);
4673                 if (list_itr->status)
4674                         return list_itr->status;
4675         }
4676         return ICE_SUCCESS;
4677 }
4678
4679 /**
4680  * ice_remove_mac - remove a MAC address based filter rule
4681  * @hw: pointer to the hardware structure
4682  * @m_list: list of MAC addresses and forwarding information
4683  *
4684  */
4685 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4686 {
4687         struct ice_sw_recipe *recp_list;
4688
4689         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4690         return ice_remove_mac_rule(hw, m_list, recp_list);
4691 }
4692
4693 /**
4694  * ice_remove_vlan_rule - Remove VLAN based filter rule
4695  * @hw: pointer to the hardware structure
4696  * @v_list: list of VLAN entries and forwarding information
4697  * @recp_list: list from which function remove VLAN
4698  */
4699 static enum ice_status
4700 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4701                      struct ice_sw_recipe *recp_list)
4702 {
4703         struct ice_fltr_list_entry *v_list_itr, *tmp;
4704
4705         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4706                                  list_entry) {
4707                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4708
4709                 if (l_type != ICE_SW_LKUP_VLAN)
4710                         return ICE_ERR_PARAM;
4711                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4712                                                               v_list_itr);
4713                 if (v_list_itr->status)
4714                         return v_list_itr->status;
4715         }
4716         return ICE_SUCCESS;
4717 }
4718
4719 /**
4720  * ice_remove_vlan - remove a VLAN address based filter rule
4721  * @hw: pointer to the hardware structure
4722  * @v_list: list of VLAN and forwarding information
4723  *
4724  */
4725 enum ice_status
4726 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4727 {
4728         struct ice_sw_recipe *recp_list;
4729
4730         if (!v_list || !hw)
4731                 return ICE_ERR_PARAM;
4732
4733         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4734         return ice_remove_vlan_rule(hw, v_list, recp_list);
4735 }
4736
4737 /**
4738  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4739  * @hw: pointer to the hardware structure
4740  * @v_list: list of MAC VLAN entries and forwarding information
4741  * @recp_list: list from which function remove MAC VLAN
4742  */
4743 static enum ice_status
4744 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4745                          struct ice_sw_recipe *recp_list)
4746 {
4747         struct ice_fltr_list_entry *v_list_itr, *tmp;
4748
4749         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4750         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4751                                  list_entry) {
4752                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4753
4754                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
4755                         return ICE_ERR_PARAM;
4756                 v_list_itr->status =
4757                         ice_remove_rule_internal(hw, recp_list,
4758                                                  v_list_itr);
4759                 if (v_list_itr->status)
4760                         return v_list_itr->status;
4761         }
4762         return ICE_SUCCESS;
4763 }
4764
4765 /**
4766  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4767  * @hw: pointer to the hardware structure
4768  * @mv_list: list of MAC VLAN and forwarding information
4769  */
4770 enum ice_status
4771 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4772 {
4773         struct ice_sw_recipe *recp_list;
4774
4775         if (!mv_list || !hw)
4776                 return ICE_ERR_PARAM;
4777
4778         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4779         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
4780 }
4781
4782 /**
4783  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4784  * @fm_entry: filter entry to inspect
4785  * @vsi_handle: VSI handle to compare with filter info
4786  */
4787 static bool
4788 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4789 {
4790         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4791                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4792                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4793                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
4794                                  vsi_handle))));
4795 }
4796
4797 /**
4798  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4799  * @hw: pointer to the hardware structure
4800  * @vsi_handle: VSI handle to remove filters from
4801  * @vsi_list_head: pointer to the list to add entry to
4802  * @fi: pointer to fltr_info of filter entry to copy & add
4803  *
4804  * Helper function, used when creating a list of filters to remove from
4805  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4806  * original filter entry, with the exception of fltr_info.fltr_act and
4807  * fltr_info.fwd_id fields. These are set such that later logic can
4808  * extract which VSI to remove the fltr from, and pass on that information.
4809  */
4810 static enum ice_status
4811 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4812                                struct LIST_HEAD_TYPE *vsi_list_head,
4813                                struct ice_fltr_info *fi)
4814 {
4815         struct ice_fltr_list_entry *tmp;
4816
4817         /* this memory is freed up in the caller function
4818          * once filters for this VSI are removed
4819          */
4820         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
4821         if (!tmp)
4822                 return ICE_ERR_NO_MEMORY;
4823
4824         tmp->fltr_info = *fi;
4825
4826         /* Overwrite these fields to indicate which VSI to remove filter from,
4827          * so find and remove logic can extract the information from the
4828          * list entries. Note that original entries will still have proper
4829          * values.
4830          */
4831         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4832         tmp->fltr_info.vsi_handle = vsi_handle;
4833         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4834
4835         LIST_ADD(&tmp->list_entry, vsi_list_head);
4836
4837         return ICE_SUCCESS;
4838 }
4839
4840 /**
4841  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4842  * @hw: pointer to the hardware structure
4843  * @vsi_handle: VSI handle to remove filters from
4844  * @lkup_list_head: pointer to the list that has certain lookup type filters
4845  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4846  *
4847  * Locates all filters in lkup_list_head that are used by the given VSI,
4848  * and adds COPIES of those entries to vsi_list_head (intended to be used
4849  * to remove the listed filters).
4850  * Note that this means all entries in vsi_list_head must be explicitly
4851  * deallocated by the caller when done with list.
4852  */
4853 static enum ice_status
4854 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4855                          struct LIST_HEAD_TYPE *lkup_list_head,
4856                          struct LIST_HEAD_TYPE *vsi_list_head)
4857 {
4858         struct ice_fltr_mgmt_list_entry *fm_entry;
4859         enum ice_status status = ICE_SUCCESS;
4860
4861         /* check to make sure VSI ID is valid and within boundary */
4862         if (!ice_is_vsi_valid(hw, vsi_handle))
4863                 return ICE_ERR_PARAM;
4864
4865         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
4866                             ice_fltr_mgmt_list_entry, list_entry) {
4867                 struct ice_fltr_info *fi;
4868
4869                 fi = &fm_entry->fltr_info;
4870                 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
4871                         continue;
4872
4873                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4874                                                         vsi_list_head, fi);
4875                 if (status)
4876                         return status;
4877         }
4878         return status;
4879 }
4880
4881 /**
4882  * ice_determine_promisc_mask
4883  * @fi: filter info to parse
4884  *
4885  * Helper function to determine which ICE_PROMISC_ mask corresponds
4886  * to given filter into.
4887  */
4888 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4889 {
4890         u16 vid = fi->l_data.mac_vlan.vlan_id;
4891         u8 *macaddr = fi->l_data.mac.mac_addr;
4892         bool is_tx_fltr = false;
4893         u8 promisc_mask = 0;
4894
4895         if (fi->flag == ICE_FLTR_TX)
4896                 is_tx_fltr = true;
4897
4898         if (IS_BROADCAST_ETHER_ADDR(macaddr))
4899                 promisc_mask |= is_tx_fltr ?
4900                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4901         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
4902                 promisc_mask |= is_tx_fltr ?
4903                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4904         else if (IS_UNICAST_ETHER_ADDR(macaddr))
4905                 promisc_mask |= is_tx_fltr ?
4906                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4907         if (vid)
4908                 promisc_mask |= is_tx_fltr ?
4909                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4910
4911         return promisc_mask;
4912 }
4913
4914 /**
4915  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
4916  * @hw: pointer to the hardware structure
4917  * @vsi_handle: VSI handle to retrieve info from
4918  * @promisc_mask: pointer to mask to be filled in
4919  * @vid: VLAN ID of promisc VLAN VSI
4920  * @sw: pointer to switch info struct for which function add rule
4921  */
4922 static enum ice_status
4923 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4924                      u16 *vid, struct ice_switch_info *sw)
4925 {
4926         struct ice_fltr_mgmt_list_entry *itr;
4927         struct LIST_HEAD_TYPE *rule_head;
4928         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4929
4930         if (!ice_is_vsi_valid(hw, vsi_handle))
4931                 return ICE_ERR_PARAM;
4932
4933         *vid = 0;
4934         *promisc_mask = 0;
4935         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
4936         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
4937
4938         ice_acquire_lock(rule_lock);
4939         LIST_FOR_EACH_ENTRY(itr, rule_head,
4940                             ice_fltr_mgmt_list_entry, list_entry) {
4941                 /* Continue if this filter doesn't apply to this VSI or the
4942                  * VSI ID is not in the VSI map for this filter
4943                  */
4944                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4945                         continue;
4946
4947                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
4948         }
4949         ice_release_lock(rule_lock);
4950
4951         return ICE_SUCCESS;
4952 }
4953
4954 /**
4955  * ice_get_vsi_promisc - get promiscuous mode of given VSI
4956  * @hw: pointer to the hardware structure
4957  * @vsi_handle: VSI handle to retrieve info from
4958  * @promisc_mask: pointer to mask to be filled in
4959  * @vid: VLAN ID of promisc VLAN VSI
4960  */
4961 enum ice_status
4962 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4963                     u16 *vid)
4964 {
4965         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
4966                                     vid, hw->switch_info);
4967 }
4968
4969 /**
4970  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
4971  * @hw: pointer to the hardware structure
4972  * @vsi_handle: VSI handle to retrieve info from
4973  * @promisc_mask: pointer to mask to be filled in
4974  * @vid: VLAN ID of promisc VLAN VSI
4975  * @sw: pointer to switch info struct for which function add rule
4976  */
4977 static enum ice_status
4978 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
4979                           u16 *vid, struct ice_switch_info *sw)
4980 {
4981         struct ice_fltr_mgmt_list_entry *itr;
4982         struct LIST_HEAD_TYPE *rule_head;
4983         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4984
4985         if (!ice_is_vsi_valid(hw, vsi_handle))
4986                 return ICE_ERR_PARAM;
4987
4988         *vid = 0;
4989         *promisc_mask = 0;
4990         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
4991         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
4992
4993         ice_acquire_lock(rule_lock);
4994         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
4995                             list_entry) {
4996                 /* Continue if this filter doesn't apply to this VSI or the
4997                  * VSI ID is not in the VSI map for this filter
4998                  */
4999                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5000                         continue;
5001
5002                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5003         }
5004         ice_release_lock(rule_lock);
5005
5006         return ICE_SUCCESS;
5007 }
5008
5009 /**
5010  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5011  * @hw: pointer to the hardware structure
5012  * @vsi_handle: VSI handle to retrieve info from
5013  * @promisc_mask: pointer to mask to be filled in
5014  * @vid: VLAN ID of promisc VLAN VSI
5015  */
5016 enum ice_status
5017 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5018                          u16 *vid)
5019 {
5020         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5021                                          vid, hw->switch_info);
5022 }
5023
5024 /**
5025  * ice_remove_promisc - Remove promisc based filter rules
5026  * @hw: pointer to the hardware structure
5027  * @recp_id: recipe ID for which the rule needs to removed
5028  * @v_list: list of promisc entries
5029  */
5030 static enum ice_status
5031 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5032                    struct LIST_HEAD_TYPE *v_list)
5033 {
5034         struct ice_fltr_list_entry *v_list_itr, *tmp;
5035         struct ice_sw_recipe *recp_list;
5036
5037         recp_list = &hw->switch_info->recp_list[recp_id];
5038         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5039                                  list_entry) {
5040                 v_list_itr->status =
5041                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5042                 if (v_list_itr->status)
5043                         return v_list_itr->status;
5044         }
5045         return ICE_SUCCESS;
5046 }
5047
5048 /**
5049  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5050  * @hw: pointer to the hardware structure
5051  * @vsi_handle: VSI handle to clear mode
5052  * @promisc_mask: mask of promiscuous config bits to clear
5053  * @vid: VLAN ID to clear VLAN promiscuous
5054  * @sw: pointer to switch info struct for which function add rule
5055  */
5056 static enum ice_status
5057 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5058                        u16 vid, struct ice_switch_info *sw)
5059 {
5060         struct ice_fltr_list_entry *fm_entry, *tmp;
5061         struct LIST_HEAD_TYPE remove_list_head;
5062         struct ice_fltr_mgmt_list_entry *itr;
5063         struct LIST_HEAD_TYPE *rule_head;
5064         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5065         enum ice_status status = ICE_SUCCESS;
5066         u8 recipe_id;
5067
5068         if (!ice_is_vsi_valid(hw, vsi_handle))
5069                 return ICE_ERR_PARAM;
5070
5071         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5072                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5073         else
5074                 recipe_id = ICE_SW_LKUP_PROMISC;
5075
5076         rule_head = &sw->recp_list[recipe_id].filt_rules;
5077         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5078
5079         INIT_LIST_HEAD(&remove_list_head);
5080
5081         ice_acquire_lock(rule_lock);
5082         LIST_FOR_EACH_ENTRY(itr, rule_head,
5083                             ice_fltr_mgmt_list_entry, list_entry) {
5084                 struct ice_fltr_info *fltr_info;
5085                 u8 fltr_promisc_mask = 0;
5086
5087                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5088                         continue;
5089                 fltr_info = &itr->fltr_info;
5090
5091                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5092                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5093                         continue;
5094
5095                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5096
5097                 /* Skip if filter is not completely specified by given mask */
5098                 if (fltr_promisc_mask & ~promisc_mask)
5099                         continue;
5100
5101                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5102                                                         &remove_list_head,
5103                                                         fltr_info);
5104                 if (status) {
5105                         ice_release_lock(rule_lock);
5106                         goto free_fltr_list;
5107                 }
5108         }
5109         ice_release_lock(rule_lock);
5110
5111         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5112
5113 free_fltr_list:
5114         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5115                                  ice_fltr_list_entry, list_entry) {
5116                 LIST_DEL(&fm_entry->list_entry);
5117                 ice_free(hw, fm_entry);
5118         }
5119
5120         return status;
5121 }
5122
5123 /**
5124  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5125  * @hw: pointer to the hardware structure
5126  * @vsi_handle: VSI handle to clear mode
5127  * @promisc_mask: mask of promiscuous config bits to clear
5128  * @vid: VLAN ID to clear VLAN promiscuous
5129  */
5130 enum ice_status
5131 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5132                       u8 promisc_mask, u16 vid)
5133 {
5134         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5135                                       vid, hw->switch_info);
5136 }
5137
5138 /**
5139  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5140  * @hw: pointer to the hardware structure
5141  * @vsi_handle: VSI handle to configure
5142  * @promisc_mask: mask of promiscuous config bits
5143  * @vid: VLAN ID to set VLAN promiscuous
5144  * @lport: logical port number to configure promisc mode
5145  * @sw: pointer to switch info struct for which function add rule
5146  */
5147 static enum ice_status
5148 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5149                      u16 vid, u8 lport, struct ice_switch_info *sw)
5150 {
5151         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5152         struct ice_fltr_list_entry f_list_entry;
5153         struct ice_fltr_info new_fltr;
5154         enum ice_status status = ICE_SUCCESS;
5155         bool is_tx_fltr;
5156         u16 hw_vsi_id;
5157         int pkt_type;
5158         u8 recipe_id;
5159
5160         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5161
5162         if (!ice_is_vsi_valid(hw, vsi_handle))
5163                 return ICE_ERR_PARAM;
5164         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5165
5166         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5167
5168         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5169                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5170                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5171                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5172         } else {
5173                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5174                 recipe_id = ICE_SW_LKUP_PROMISC;
5175         }
5176
5177         /* Separate filters must be set for each direction/packet type
5178          * combination, so we will loop over the mask value, store the
5179          * individual type, and clear it out in the input mask as it
5180          * is found.
5181          */
5182         while (promisc_mask) {
5183                 struct ice_sw_recipe *recp_list;
5184                 u8 *mac_addr;
5185
5186                 pkt_type = 0;
5187                 is_tx_fltr = false;
5188
5189                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5190                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5191                         pkt_type = UCAST_FLTR;
5192                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5193                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5194                         pkt_type = UCAST_FLTR;
5195                         is_tx_fltr = true;
5196                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5197                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5198                         pkt_type = MCAST_FLTR;
5199                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5200                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5201                         pkt_type = MCAST_FLTR;
5202                         is_tx_fltr = true;
5203                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5204                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5205                         pkt_type = BCAST_FLTR;
5206                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5207                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5208                         pkt_type = BCAST_FLTR;
5209                         is_tx_fltr = true;
5210                 }
5211
5212                 /* Check for VLAN promiscuous flag */
5213                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5214                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5215                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5216                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5217                         is_tx_fltr = true;
5218                 }
5219
5220                 /* Set filter DA based on packet type */
5221                 mac_addr = new_fltr.l_data.mac.mac_addr;
5222                 if (pkt_type == BCAST_FLTR) {
5223                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5224                 } else if (pkt_type == MCAST_FLTR ||
5225                            pkt_type == UCAST_FLTR) {
5226                         /* Use the dummy ether header DA */
5227                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5228                                    ICE_NONDMA_TO_NONDMA);
5229                         if (pkt_type == MCAST_FLTR)
5230                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
5231                 }
5232
5233                 /* Need to reset this to zero for all iterations */
5234                 new_fltr.flag = 0;
5235                 if (is_tx_fltr) {
5236                         new_fltr.flag |= ICE_FLTR_TX;
5237                         new_fltr.src = hw_vsi_id;
5238                 } else {
5239                         new_fltr.flag |= ICE_FLTR_RX;
5240                         new_fltr.src = lport;
5241                 }
5242
5243                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
5244                 new_fltr.vsi_handle = vsi_handle;
5245                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5246                 f_list_entry.fltr_info = new_fltr;
5247                 recp_list = &sw->recp_list[recipe_id];
5248
5249                 status = ice_add_rule_internal(hw, recp_list, lport,
5250                                                &f_list_entry);
5251                 if (status != ICE_SUCCESS)
5252                         goto set_promisc_exit;
5253         }
5254
5255 set_promisc_exit:
5256         return status;
5257 }
5258
5259 /**
5260  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5261  * @hw: pointer to the hardware structure
5262  * @vsi_handle: VSI handle to configure
5263  * @promisc_mask: mask of promiscuous config bits
5264  * @vid: VLAN ID to set VLAN promiscuous
5265  */
5266 enum ice_status
5267 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5268                     u16 vid)
5269 {
5270         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5271                                     hw->port_info->lport,
5272                                     hw->switch_info);
5273 }
5274
5275 /**
5276  * _ice_set_vlan_vsi_promisc
5277  * @hw: pointer to the hardware structure
5278  * @vsi_handle: VSI handle to configure
5279  * @promisc_mask: mask of promiscuous config bits
5280  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5281  * @lport: logical port number to configure promisc mode
5282  * @sw: pointer to switch info struct for which function add rule
5283  *
5284  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5285  */
5286 static enum ice_status
5287 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5288                           bool rm_vlan_promisc, u8 lport,
5289                           struct ice_switch_info *sw)
5290 {
5291         struct ice_fltr_list_entry *list_itr, *tmp;
5292         struct LIST_HEAD_TYPE vsi_list_head;
5293         struct LIST_HEAD_TYPE *vlan_head;
5294         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5295         enum ice_status status;
5296         u16 vlan_id;
5297
5298         INIT_LIST_HEAD(&vsi_list_head);
5299         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5300         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5301         ice_acquire_lock(vlan_lock);
5302         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5303                                           &vsi_list_head);
5304         ice_release_lock(vlan_lock);
5305         if (status)
5306                 goto free_fltr_list;
5307
5308         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5309                             list_entry) {
5310                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5311                 if (rm_vlan_promisc)
5312                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
5313                                                          promisc_mask,
5314                                                          vlan_id, sw);
5315                 else
5316                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
5317                                                        promisc_mask, vlan_id,
5318                                                        lport, sw);
5319                 if (status)
5320                         break;
5321         }
5322
5323 free_fltr_list:
5324         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5325                                  ice_fltr_list_entry, list_entry) {
5326                 LIST_DEL(&list_itr->list_entry);
5327                 ice_free(hw, list_itr);
5328         }
5329         return status;
5330 }
5331
5332 /**
5333  * ice_set_vlan_vsi_promisc
5334  * @hw: pointer to the hardware structure
5335  * @vsi_handle: VSI handle to configure
5336  * @promisc_mask: mask of promiscuous config bits
5337  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5338  *
5339  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5340  */
5341 enum ice_status
5342 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5343                          bool rm_vlan_promisc)
5344 {
5345         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5346                                          rm_vlan_promisc, hw->port_info->lport,
5347                                          hw->switch_info);
5348 }
5349
5350 /**
5351  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5352  * @hw: pointer to the hardware structure
5353  * @vsi_handle: VSI handle to remove filters from
5354  * @recp_list: recipe list from which function remove fltr
5355  * @lkup: switch rule filter lookup type
5356  */
5357 static void
5358 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5359                          struct ice_sw_recipe *recp_list,
5360                          enum ice_sw_lkup_type lkup)
5361 {
5362         struct ice_fltr_list_entry *fm_entry;
5363         struct LIST_HEAD_TYPE remove_list_head;
5364         struct LIST_HEAD_TYPE *rule_head;
5365         struct ice_fltr_list_entry *tmp;
5366         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5367         enum ice_status status;
5368
5369         INIT_LIST_HEAD(&remove_list_head);
5370         rule_lock = &recp_list[lkup].filt_rule_lock;
5371         rule_head = &recp_list[lkup].filt_rules;
5372         ice_acquire_lock(rule_lock);
5373         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5374                                           &remove_list_head);
5375         ice_release_lock(rule_lock);
5376         if (status)
5377                 return;
5378
5379         switch (lkup) {
5380         case ICE_SW_LKUP_MAC:
5381                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5382                 break;
5383         case ICE_SW_LKUP_VLAN:
5384                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5385                 break;
5386         case ICE_SW_LKUP_PROMISC:
5387         case ICE_SW_LKUP_PROMISC_VLAN:
5388                 ice_remove_promisc(hw, lkup, &remove_list_head);
5389                 break;
5390         case ICE_SW_LKUP_MAC_VLAN:
5391                 ice_remove_mac_vlan(hw, &remove_list_head);
5392                 break;
5393         case ICE_SW_LKUP_ETHERTYPE:
5394         case ICE_SW_LKUP_ETHERTYPE_MAC:
5395                 ice_remove_eth_mac(hw, &remove_list_head);
5396                 break;
5397         case ICE_SW_LKUP_DFLT:
5398                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5399                 break;
5400         case ICE_SW_LKUP_LAST:
5401                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5402                 break;
5403         }
5404
5405         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5406                                  ice_fltr_list_entry, list_entry) {
5407                 LIST_DEL(&fm_entry->list_entry);
5408                 ice_free(hw, fm_entry);
5409         }
5410 }
5411
5412 /**
5413  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5414  * @hw: pointer to the hardware structure
5415  * @vsi_handle: VSI handle to remove filters from
5416  * @sw: pointer to switch info struct
5417  */
5418 static void
5419 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5420                          struct ice_switch_info *sw)
5421 {
5422         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5423
5424         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5425                                  sw->recp_list, ICE_SW_LKUP_MAC);
5426         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5427                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5428         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5429                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
5430         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5431                                  sw->recp_list, ICE_SW_LKUP_VLAN);
5432         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5433                                  sw->recp_list, ICE_SW_LKUP_DFLT);
5434         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5435                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5436         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5437                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5438         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5439                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5440 }
5441
5442 /**
5443  * ice_remove_vsi_fltr - Remove all filters for a VSI
5444  * @hw: pointer to the hardware structure
5445  * @vsi_handle: VSI handle to remove filters from
5446  */
5447 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5448 {
5449         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5450 }
5451
5452 /**
5453  * ice_alloc_res_cntr - allocating resource counter
5454  * @hw: pointer to the hardware structure
5455  * @type: type of resource
5456  * @alloc_shared: if set it is shared else dedicated
5457  * @num_items: number of entries requested for FD resource type
5458  * @counter_id: counter index returned by AQ call
5459  */
5460 enum ice_status
5461 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5462                    u16 *counter_id)
5463 {
5464         struct ice_aqc_alloc_free_res_elem *buf;
5465         enum ice_status status;
5466         u16 buf_len;
5467
5468         /* Allocate resource */
5469         buf_len = ice_struct_size(buf, elem, 1);
5470         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5471         if (!buf)
5472                 return ICE_ERR_NO_MEMORY;
5473
5474         buf->num_elems = CPU_TO_LE16(num_items);
5475         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5476                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5477
5478         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5479                                        ice_aqc_opc_alloc_res, NULL);
5480         if (status)
5481                 goto exit;
5482
5483         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5484
5485 exit:
5486         ice_free(hw, buf);
5487         return status;
5488 }
5489
5490 /**
5491  * ice_free_res_cntr - free resource counter
5492  * @hw: pointer to the hardware structure
5493  * @type: type of resource
5494  * @alloc_shared: if set it is shared else dedicated
5495  * @num_items: number of entries to be freed for FD resource type
5496  * @counter_id: counter ID resource which needs to be freed
5497  */
5498 enum ice_status
5499 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5500                   u16 counter_id)
5501 {
5502         struct ice_aqc_alloc_free_res_elem *buf;
5503         enum ice_status status;
5504         u16 buf_len;
5505
5506         /* Free resource */
5507         buf_len = ice_struct_size(buf, elem, 1);
5508         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5509         if (!buf)
5510                 return ICE_ERR_NO_MEMORY;
5511
5512         buf->num_elems = CPU_TO_LE16(num_items);
5513         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5514                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
5515         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5516
5517         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5518                                        ice_aqc_opc_free_res, NULL);
5519         if (status)
5520                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5521
5522         ice_free(hw, buf);
5523         return status;
5524 }
5525
5526 /**
5527  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5528  * @hw: pointer to the hardware structure
5529  * @counter_id: returns counter index
5530  */
5531 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5532 {
5533         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5534                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5535                                   counter_id);
5536 }
5537
5538 /**
5539  * ice_free_vlan_res_counter - Free counter resource for VLAN type
5540  * @hw: pointer to the hardware structure
5541  * @counter_id: counter index to be freed
5542  */
5543 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5544 {
5545         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5546                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5547                                  counter_id);
5548 }
5549
5550 /**
5551  * ice_alloc_res_lg_act - add large action resource
5552  * @hw: pointer to the hardware structure
5553  * @l_id: large action ID to fill it in
5554  * @num_acts: number of actions to hold with a large action entry
5555  */
5556 static enum ice_status
5557 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5558 {
5559         struct ice_aqc_alloc_free_res_elem *sw_buf;
5560         enum ice_status status;
5561         u16 buf_len;
5562
5563         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5564                 return ICE_ERR_PARAM;
5565
5566         /* Allocate resource for large action */
5567         buf_len = ice_struct_size(sw_buf, elem, 1);
5568         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5569         if (!sw_buf)
5570                 return ICE_ERR_NO_MEMORY;
5571
5572         sw_buf->num_elems = CPU_TO_LE16(1);
5573
5574         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5575          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5576          * If num_acts is greater than 2, then use
5577          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5578          * The num_acts cannot exceed 4. This was ensured at the
5579          * beginning of the function.
5580          */
5581         if (num_acts == 1)
5582                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5583         else if (num_acts == 2)
5584                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5585         else
5586                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5587
5588         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5589                                        ice_aqc_opc_alloc_res, NULL);
5590         if (!status)
5591                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5592
5593         ice_free(hw, sw_buf);
5594         return status;
5595 }
5596
5597 /**
5598  * ice_add_mac_with_sw_marker - add filter with sw marker
5599  * @hw: pointer to the hardware structure
5600  * @f_info: filter info structure containing the MAC filter information
5601  * @sw_marker: sw marker to tag the Rx descriptor with
5602  */
5603 enum ice_status
5604 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5605                            u16 sw_marker)
5606 {
5607         struct ice_fltr_mgmt_list_entry *m_entry;
5608         struct ice_fltr_list_entry fl_info;
5609         struct ice_sw_recipe *recp_list;
5610         struct LIST_HEAD_TYPE l_head;
5611         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5612         enum ice_status ret;
5613         bool entry_exists;
5614         u16 lg_act_id;
5615
5616         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5617                 return ICE_ERR_PARAM;
5618
5619         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5620                 return ICE_ERR_PARAM;
5621
5622         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5623                 return ICE_ERR_PARAM;
5624
5625         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5626                 return ICE_ERR_PARAM;
5627         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5628
5629         /* Add filter if it doesn't exist so then the adding of large
5630          * action always results in update
5631          */
5632
5633         INIT_LIST_HEAD(&l_head);
5634         fl_info.fltr_info = *f_info;
5635         LIST_ADD(&fl_info.list_entry, &l_head);
5636
5637         entry_exists = false;
5638         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5639                                hw->port_info->lport);
5640         if (ret == ICE_ERR_ALREADY_EXISTS)
5641                 entry_exists = true;
5642         else if (ret)
5643                 return ret;
5644
5645         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5646         rule_lock = &recp_list->filt_rule_lock;
5647         ice_acquire_lock(rule_lock);
5648         /* Get the book keeping entry for the filter */
5649         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5650         if (!m_entry)
5651                 goto exit_error;
5652
5653         /* If counter action was enabled for this rule then don't enable
5654          * sw marker large action
5655          */
5656         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5657                 ret = ICE_ERR_PARAM;
5658                 goto exit_error;
5659         }
5660
5661         /* if same marker was added before */
5662         if (m_entry->sw_marker_id == sw_marker) {
5663                 ret = ICE_ERR_ALREADY_EXISTS;
5664                 goto exit_error;
5665         }
5666
5667         /* Allocate a hardware table entry to hold large act. Three actions
5668          * for marker based large action
5669          */
5670         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5671         if (ret)
5672                 goto exit_error;
5673
5674         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5675                 goto exit_error;
5676
5677         /* Update the switch rule to add the marker action */
5678         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5679         if (!ret) {
5680                 ice_release_lock(rule_lock);
5681                 return ret;
5682         }
5683
5684 exit_error:
5685         ice_release_lock(rule_lock);
5686         /* only remove entry if it did not exist previously */
5687         if (!entry_exists)
5688                 ret = ice_remove_mac(hw, &l_head);
5689
5690         return ret;
5691 }
5692
5693 /**
5694  * ice_add_mac_with_counter - add filter with counter enabled
5695  * @hw: pointer to the hardware structure
5696  * @f_info: pointer to filter info structure containing the MAC filter
5697  *          information
5698  */
5699 enum ice_status
5700 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5701 {
5702         struct ice_fltr_mgmt_list_entry *m_entry;
5703         struct ice_fltr_list_entry fl_info;
5704         struct ice_sw_recipe *recp_list;
5705         struct LIST_HEAD_TYPE l_head;
5706         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5707         enum ice_status ret;
5708         bool entry_exist;
5709         u16 counter_id;
5710         u16 lg_act_id;
5711
5712         if (f_info->fltr_act != ICE_FWD_TO_VSI)
5713                 return ICE_ERR_PARAM;
5714
5715         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5716                 return ICE_ERR_PARAM;
5717
5718         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5719                 return ICE_ERR_PARAM;
5720         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5721         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5722
5723         entry_exist = false;
5724
5725         rule_lock = &recp_list->filt_rule_lock;
5726
5727         /* Add filter if it doesn't exist so then the adding of large
5728          * action always results in update
5729          */
5730         INIT_LIST_HEAD(&l_head);
5731
5732         fl_info.fltr_info = *f_info;
5733         LIST_ADD(&fl_info.list_entry, &l_head);
5734
5735         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5736                                hw->port_info->lport);
5737         if (ret == ICE_ERR_ALREADY_EXISTS)
5738                 entry_exist = true;
5739         else if (ret)
5740                 return ret;
5741
5742         ice_acquire_lock(rule_lock);
5743         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5744         if (!m_entry) {
5745                 ret = ICE_ERR_BAD_PTR;
5746                 goto exit_error;
5747         }
5748
5749         /* Don't enable counter for a filter for which sw marker was enabled */
5750         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5751                 ret = ICE_ERR_PARAM;
5752                 goto exit_error;
5753         }
5754
5755         /* If a counter was already enabled then don't need to add again */
5756         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5757                 ret = ICE_ERR_ALREADY_EXISTS;
5758                 goto exit_error;
5759         }
5760
5761         /* Allocate a hardware table entry to VLAN counter */
5762         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5763         if (ret)
5764                 goto exit_error;
5765
5766         /* Allocate a hardware table entry to hold large act. Two actions for
5767          * counter based large action
5768          */
5769         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5770         if (ret)
5771                 goto exit_error;
5772
5773         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5774                 goto exit_error;
5775
5776         /* Update the switch rule to add the counter action */
5777         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
5778         if (!ret) {
5779                 ice_release_lock(rule_lock);
5780                 return ret;
5781         }
5782
5783 exit_error:
5784         ice_release_lock(rule_lock);
5785         /* only remove entry if it did not exist previously */
5786         if (!entry_exist)
5787                 ret = ice_remove_mac(hw, &l_head);
5788
5789         return ret;
5790 }
5791
5792 /* This is mapping table entry that maps every word within a given protocol
5793  * structure to the real byte offset as per the specification of that
5794  * protocol header.
5795  * for example dst address is 3 words in ethertype header and corresponding
5796  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
5797  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
5798  * matching entry describing its field. This needs to be updated if new
5799  * structure is added to that union.
5800  */
5801 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
5802         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
5803         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
5804         { ICE_ETYPE_OL,         { 0 } },
5805         { ICE_VLAN_OFOS,        { 0, 2 } },
5806         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5807         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
5808         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5809                                  26, 28, 30, 32, 34, 36, 38 } },
5810         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
5811                                  26, 28, 30, 32, 34, 36, 38 } },
5812         { ICE_TCP_IL,           { 0, 2 } },
5813         { ICE_UDP_OF,           { 0, 2 } },
5814         { ICE_UDP_ILOS,         { 0, 2 } },
5815         { ICE_SCTP_IL,          { 0, 2 } },
5816         { ICE_VXLAN,            { 8, 10, 12, 14 } },
5817         { ICE_GENEVE,           { 8, 10, 12, 14 } },
5818         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
5819         { ICE_NVGRE,            { 0, 2, 4, 6 } },
5820         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20 } },
5821         { ICE_PPPOE,            { 0, 2, 4, 6 } },
5822         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
5823         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
5824         { ICE_ESP,              { 0, 2, 4, 6 } },
5825         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
5826         { ICE_NAT_T,            { 8, 10, 12, 14 } },
5827         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
5828 };
5829
5830 /* The following table describes preferred grouping of recipes.
5831  * If a recipe that needs to be programmed is a superset or matches one of the
5832  * following combinations, then the recipe needs to be chained as per the
5833  * following policy.
5834  */
5835
5836 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
5837         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
5838         { ICE_MAC_IL,           ICE_MAC_IL_HW },
5839         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
5840         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
5841         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
5842         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
5843         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
5844         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
5845         { ICE_TCP_IL,           ICE_TCP_IL_HW },
5846         { ICE_UDP_OF,           ICE_UDP_OF_HW },
5847         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
5848         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
5849         { ICE_VXLAN,            ICE_UDP_OF_HW },
5850         { ICE_GENEVE,           ICE_UDP_OF_HW },
5851         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
5852         { ICE_NVGRE,            ICE_GRE_OF_HW },
5853         { ICE_GTP,              ICE_UDP_OF_HW },
5854         { ICE_PPPOE,            ICE_PPPOE_HW },
5855         { ICE_PFCP,             ICE_UDP_ILOS_HW },
5856         { ICE_L2TPV3,           ICE_L2TPV3_HW },
5857         { ICE_ESP,              ICE_ESP_HW },
5858         { ICE_AH,               ICE_AH_HW },
5859         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
5860         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
5861 };
5862
5863 /**
5864  * ice_find_recp - find a recipe
5865  * @hw: pointer to the hardware structure
5866  * @lkup_exts: extension sequence to match
5867  *
5868  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
5869  */
5870 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
5871                          enum ice_sw_tunnel_type tun_type)
5872 {
5873         bool refresh_required = true;
5874         struct ice_sw_recipe *recp;
5875         u8 i;
5876
5877         /* Walk through existing recipes to find a match */
5878         recp = hw->switch_info->recp_list;
5879         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5880                 /* If recipe was not created for this ID, in SW bookkeeping,
5881                  * check if FW has an entry for this recipe. If the FW has an
5882                  * entry update it in our SW bookkeeping and continue with the
5883                  * matching.
5884                  */
5885                 if (!recp[i].recp_created)
5886                         if (ice_get_recp_frm_fw(hw,
5887                                                 hw->switch_info->recp_list, i,
5888                                                 &refresh_required))
5889                                 continue;
5890
5891                 /* Skip inverse action recipes */
5892                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
5893                     ICE_AQ_RECIPE_ACT_INV_ACT)
5894                         continue;
5895
5896                 /* if number of words we are looking for match */
5897                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
5898                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
5899                         struct ice_fv_word *be = lkup_exts->fv_words;
5900                         u16 *cr = recp[i].lkup_exts.field_mask;
5901                         u16 *de = lkup_exts->field_mask;
5902                         bool found = true;
5903                         u8 pe, qr;
5904
5905                         /* ar, cr, and qr are related to the recipe words, while
5906                          * be, de, and pe are related to the lookup words
5907                          */
5908                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
5909                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
5910                                      qr++) {
5911                                         if (ar[qr].off == be[pe].off &&
5912                                             ar[qr].prot_id == be[pe].prot_id &&
5913                                             cr[qr] == de[pe])
5914                                                 /* Found the "pe"th word in the
5915                                                  * given recipe
5916                                                  */
5917                                                 break;
5918                                 }
5919                                 /* After walking through all the words in the
5920                                  * "i"th recipe if "p"th word was not found then
5921                                  * this recipe is not what we are looking for.
5922                                  * So break out from this loop and try the next
5923                                  * recipe
5924                                  */
5925                                 if (qr >= recp[i].lkup_exts.n_val_words) {
5926                                         found = false;
5927                                         break;
5928                                 }
5929                         }
5930                         /* If for "i"th recipe the found was never set to false
5931                          * then it means we found our match
5932                          */
5933                         if (tun_type == recp[i].tun_type && found)
5934                                 return i; /* Return the recipe ID */
5935                 }
5936         }
5937         return ICE_MAX_NUM_RECIPES;
5938 }
5939
5940 /**
5941  * ice_prot_type_to_id - get protocol ID from protocol type
5942  * @type: protocol type
5943  * @id: pointer to variable that will receive the ID
5944  *
5945  * Returns true if found, false otherwise
5946  */
5947 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
5948 {
5949         u8 i;
5950
5951         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
5952                 if (ice_prot_id_tbl[i].type == type) {
5953                         *id = ice_prot_id_tbl[i].protocol_id;
5954                         return true;
5955                 }
5956         return false;
5957 }
5958
5959 /**
5960  * ice_find_valid_words - count valid words
5961  * @rule: advanced rule with lookup information
5962  * @lkup_exts: byte offset extractions of the words that are valid
5963  *
5964  * calculate valid words in a lookup rule using mask value
5965  */
5966 static u8
5967 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
5968                      struct ice_prot_lkup_ext *lkup_exts)
5969 {
5970         u8 j, word, prot_id, ret_val;
5971
5972         if (!ice_prot_type_to_id(rule->type, &prot_id))
5973                 return 0;
5974
5975         word = lkup_exts->n_val_words;
5976
5977         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
5978                 if (((u16 *)&rule->m_u)[j] &&
5979                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
5980                         /* No more space to accommodate */
5981                         if (word >= ICE_MAX_CHAIN_WORDS)
5982                                 return 0;
5983                         lkup_exts->fv_words[word].off =
5984                                 ice_prot_ext[rule->type].offs[j];
5985                         lkup_exts->fv_words[word].prot_id =
5986                                 ice_prot_id_tbl[rule->type].protocol_id;
5987                         lkup_exts->field_mask[word] =
5988                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
5989                         word++;
5990                 }
5991
5992         ret_val = word - lkup_exts->n_val_words;
5993         lkup_exts->n_val_words = word;
5994
5995         return ret_val;
5996 }
5997
5998 /**
5999  * ice_create_first_fit_recp_def - Create a recipe grouping
6000  * @hw: pointer to the hardware structure
6001  * @lkup_exts: an array of protocol header extractions
6002  * @rg_list: pointer to a list that stores new recipe groups
6003  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6004  *
6005  * Using first fit algorithm, take all the words that are still not done
6006  * and start grouping them in 4-word groups. Each group makes up one
6007  * recipe.
6008  */
6009 static enum ice_status
6010 ice_create_first_fit_recp_def(struct ice_hw *hw,
6011                               struct ice_prot_lkup_ext *lkup_exts,
6012                               struct LIST_HEAD_TYPE *rg_list,
6013                               u8 *recp_cnt)
6014 {
6015         struct ice_pref_recipe_group *grp = NULL;
6016         u8 j;
6017
6018         *recp_cnt = 0;
6019
6020         if (!lkup_exts->n_val_words) {
6021                 struct ice_recp_grp_entry *entry;
6022
6023                 entry = (struct ice_recp_grp_entry *)
6024                         ice_malloc(hw, sizeof(*entry));
6025                 if (!entry)
6026                         return ICE_ERR_NO_MEMORY;
6027                 LIST_ADD(&entry->l_entry, rg_list);
6028                 grp = &entry->r_group;
6029                 (*recp_cnt)++;
6030                 grp->n_val_pairs = 0;
6031         }
6032
6033         /* Walk through every word in the rule to check if it is not done. If so
6034          * then this word needs to be part of a new recipe.
6035          */
6036         for (j = 0; j < lkup_exts->n_val_words; j++)
6037                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6038                         if (!grp ||
6039                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6040                                 struct ice_recp_grp_entry *entry;
6041
6042                                 entry = (struct ice_recp_grp_entry *)
6043                                         ice_malloc(hw, sizeof(*entry));
6044                                 if (!entry)
6045                                         return ICE_ERR_NO_MEMORY;
6046                                 LIST_ADD(&entry->l_entry, rg_list);
6047                                 grp = &entry->r_group;
6048                                 (*recp_cnt)++;
6049                         }
6050
6051                         grp->pairs[grp->n_val_pairs].prot_id =
6052                                 lkup_exts->fv_words[j].prot_id;
6053                         grp->pairs[grp->n_val_pairs].off =
6054                                 lkup_exts->fv_words[j].off;
6055                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6056                         grp->n_val_pairs++;
6057                 }
6058
6059         return ICE_SUCCESS;
6060 }
6061
6062 /**
6063  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6064  * @hw: pointer to the hardware structure
6065  * @fv_list: field vector with the extraction sequence information
6066  * @rg_list: recipe groupings with protocol-offset pairs
6067  *
6068  * Helper function to fill in the field vector indices for protocol-offset
6069  * pairs. These indexes are then ultimately programmed into a recipe.
6070  */
6071 static enum ice_status
6072 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6073                        struct LIST_HEAD_TYPE *rg_list)
6074 {
6075         struct ice_sw_fv_list_entry *fv;
6076         struct ice_recp_grp_entry *rg;
6077         struct ice_fv_word *fv_ext;
6078
6079         if (LIST_EMPTY(fv_list))
6080                 return ICE_SUCCESS;
6081
6082         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6083         fv_ext = fv->fv_ptr->ew;
6084
6085         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6086                 u8 i;
6087
6088                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6089                         struct ice_fv_word *pr;
6090                         bool found = false;
6091                         u16 mask;
6092                         u8 j;
6093
6094                         pr = &rg->r_group.pairs[i];
6095                         mask = rg->r_group.mask[i];
6096
6097                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6098                                 if (fv_ext[j].prot_id == pr->prot_id &&
6099                                     fv_ext[j].off == pr->off) {
6100                                         found = true;
6101
6102                                         /* Store index of field vector */
6103                                         rg->fv_idx[i] = j;
6104                                         rg->fv_mask[i] = mask;
6105                                         break;
6106                                 }
6107
6108                         /* Protocol/offset could not be found, caller gave an
6109                          * invalid pair
6110                          */
6111                         if (!found)
6112                                 return ICE_ERR_PARAM;
6113                 }
6114         }
6115
6116         return ICE_SUCCESS;
6117 }
6118
6119 /**
6120  * ice_find_free_recp_res_idx - find free result indexes for recipe
6121  * @hw: pointer to hardware structure
6122  * @profiles: bitmap of profiles that will be associated with the new recipe
6123  * @free_idx: pointer to variable to receive the free index bitmap
6124  *
6125  * The algorithm used here is:
6126  *      1. When creating a new recipe, create a set P which contains all
6127  *         Profiles that will be associated with our new recipe
6128  *
6129  *      2. For each Profile p in set P:
6130  *          a. Add all recipes associated with Profile p into set R
6131  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6132  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6133  *              i. Or just assume they all have the same possible indexes:
6134  *                      44, 45, 46, 47
6135  *                      i.e., PossibleIndexes = 0x0000F00000000000
6136  *
6137  *      3. For each Recipe r in set R:
6138  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6139  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6140  *
6141  *      FreeIndexes will contain the bits indicating the indexes free for use,
6142  *      then the code needs to update the recipe[r].used_result_idx_bits to
6143  *      indicate which indexes were selected for use by this recipe.
6144  */
6145 static u16
6146 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6147                            ice_bitmap_t *free_idx)
6148 {
6149         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6150         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6151         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6152         u16 bit;
6153
6154         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6155         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6156         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6157         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6158
6159         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6160
6161         /* For each profile we are going to associate the recipe with, add the
6162          * recipes that are associated with that profile. This will give us
6163          * the set of recipes that our recipe may collide with. Also, determine
6164          * what possible result indexes are usable given this set of profiles.
6165          */
6166         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6167                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6168                               ICE_MAX_NUM_RECIPES);
6169                 ice_and_bitmap(possible_idx, possible_idx,
6170                                hw->switch_info->prof_res_bm[bit],
6171                                ICE_MAX_FV_WORDS);
6172         }
6173
6174         /* For each recipe that our new recipe may collide with, determine
6175          * which indexes have been used.
6176          */
6177         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6178                 ice_or_bitmap(used_idx, used_idx,
6179                               hw->switch_info->recp_list[bit].res_idxs,
6180                               ICE_MAX_FV_WORDS);
6181
6182         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6183
6184         /* return number of free indexes */
6185         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6186 }
6187
6188 /**
6189  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6190  * @hw: pointer to hardware structure
6191  * @rm: recipe management list entry
6192  * @profiles: bitmap of profiles that will be associated.
6193  */
6194 static enum ice_status
6195 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6196                   ice_bitmap_t *profiles)
6197 {
6198         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6199         struct ice_aqc_recipe_data_elem *tmp;
6200         struct ice_aqc_recipe_data_elem *buf;
6201         struct ice_recp_grp_entry *entry;
6202         enum ice_status status;
6203         u16 free_res_idx;
6204         u16 recipe_count;
6205         u8 chain_idx;
6206         u8 recps = 0;
6207
6208         /* When more than one recipe are required, another recipe is needed to
6209          * chain them together. Matching a tunnel metadata ID takes up one of
6210          * the match fields in the chaining recipe reducing the number of
6211          * chained recipes by one.
6212          */
6213          /* check number of free result indices */
6214         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6215         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6216
6217         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6218                   free_res_idx, rm->n_grp_count);
6219
6220         if (rm->n_grp_count > 1) {
6221                 if (rm->n_grp_count > free_res_idx)
6222                         return ICE_ERR_MAX_LIMIT;
6223
6224                 rm->n_grp_count++;
6225         }
6226
6227         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6228                 return ICE_ERR_MAX_LIMIT;
6229
6230         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6231                                                             ICE_MAX_NUM_RECIPES,
6232                                                             sizeof(*tmp));
6233         if (!tmp)
6234                 return ICE_ERR_NO_MEMORY;
6235
6236         buf = (struct ice_aqc_recipe_data_elem *)
6237                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6238         if (!buf) {
6239                 status = ICE_ERR_NO_MEMORY;
6240                 goto err_mem;
6241         }
6242
6243         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6244         recipe_count = ICE_MAX_NUM_RECIPES;
6245         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6246                                    NULL);
6247         if (status || recipe_count == 0)
6248                 goto err_unroll;
6249
6250         /* Allocate the recipe resources, and configure them according to the
6251          * match fields from protocol headers and extracted field vectors.
6252          */
6253         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6254         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6255                 u8 i;
6256
6257                 status = ice_alloc_recipe(hw, &entry->rid);
6258                 if (status)
6259                         goto err_unroll;
6260
6261                 /* Clear the result index of the located recipe, as this will be
6262                  * updated, if needed, later in the recipe creation process.
6263                  */
6264                 tmp[0].content.result_indx = 0;
6265
6266                 buf[recps] = tmp[0];
6267                 buf[recps].recipe_indx = (u8)entry->rid;
6268                 /* if the recipe is a non-root recipe RID should be programmed
6269                  * as 0 for the rules to be applied correctly.
6270                  */
6271                 buf[recps].content.rid = 0;
6272                 ice_memset(&buf[recps].content.lkup_indx, 0,
6273                            sizeof(buf[recps].content.lkup_indx),
6274                            ICE_NONDMA_MEM);
6275
6276                 /* All recipes use look-up index 0 to match switch ID. */
6277                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6278                 buf[recps].content.mask[0] =
6279                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6280                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6281                  * to be 0
6282                  */
6283                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6284                         buf[recps].content.lkup_indx[i] = 0x80;
6285                         buf[recps].content.mask[i] = 0;
6286                 }
6287
6288                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6289                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6290                         buf[recps].content.mask[i + 1] =
6291                                 CPU_TO_LE16(entry->fv_mask[i]);
6292                 }
6293
6294                 if (rm->n_grp_count > 1) {
6295                         /* Checks to see if there really is a valid result index
6296                          * that can be used.
6297                          */
6298                         if (chain_idx >= ICE_MAX_FV_WORDS) {
6299                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6300                                 status = ICE_ERR_MAX_LIMIT;
6301                                 goto err_unroll;
6302                         }
6303
6304                         entry->chain_idx = chain_idx;
6305                         buf[recps].content.result_indx =
6306                                 ICE_AQ_RECIPE_RESULT_EN |
6307                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6308                                  ICE_AQ_RECIPE_RESULT_DATA_M);
6309                         ice_clear_bit(chain_idx, result_idx_bm);
6310                         chain_idx = ice_find_first_bit(result_idx_bm,
6311                                                        ICE_MAX_FV_WORDS);
6312                 }
6313
6314                 /* fill recipe dependencies */
6315                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6316                                 ICE_MAX_NUM_RECIPES);
6317                 ice_set_bit(buf[recps].recipe_indx,
6318                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
6319                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6320                 recps++;
6321         }
6322
6323         if (rm->n_grp_count == 1) {
6324                 rm->root_rid = buf[0].recipe_indx;
6325                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6326                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6327                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6328                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6329                                    sizeof(buf[0].recipe_bitmap),
6330                                    ICE_NONDMA_TO_NONDMA);
6331                 } else {
6332                         status = ICE_ERR_BAD_PTR;
6333                         goto err_unroll;
6334                 }
6335                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
6336                  * the recipe which is getting created if specified
6337                  * by user. Usually any advanced switch filter, which results
6338                  * into new extraction sequence, ended up creating a new recipe
6339                  * of type ROOT and usually recipes are associated with profiles
6340                  * Switch rule referreing newly created recipe, needs to have
6341                  * either/or 'fwd' or 'join' priority, otherwise switch rule
6342                  * evaluation will not happen correctly. In other words, if
6343                  * switch rule to be evaluated on priority basis, then recipe
6344                  * needs to have priority, otherwise it will be evaluated last.
6345                  */
6346                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
6347         } else {
6348                 struct ice_recp_grp_entry *last_chain_entry;
6349                 u16 rid, i;
6350
6351                 /* Allocate the last recipe that will chain the outcomes of the
6352                  * other recipes together
6353                  */
6354                 status = ice_alloc_recipe(hw, &rid);
6355                 if (status)
6356                         goto err_unroll;
6357
6358                 buf[recps].recipe_indx = (u8)rid;
6359                 buf[recps].content.rid = (u8)rid;
6360                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6361                 /* the new entry created should also be part of rg_list to
6362                  * make sure we have complete recipe
6363                  */
6364                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6365                         sizeof(*last_chain_entry));
6366                 if (!last_chain_entry) {
6367                         status = ICE_ERR_NO_MEMORY;
6368                         goto err_unroll;
6369                 }
6370                 last_chain_entry->rid = rid;
6371                 ice_memset(&buf[recps].content.lkup_indx, 0,
6372                            sizeof(buf[recps].content.lkup_indx),
6373                            ICE_NONDMA_MEM);
6374                 /* All recipes use look-up index 0 to match switch ID. */
6375                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6376                 buf[recps].content.mask[0] =
6377                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6378                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6379                         buf[recps].content.lkup_indx[i] =
6380                                 ICE_AQ_RECIPE_LKUP_IGNORE;
6381                         buf[recps].content.mask[i] = 0;
6382                 }
6383
6384                 i = 1;
6385                 /* update r_bitmap with the recp that is used for chaining */
6386                 ice_set_bit(rid, rm->r_bitmap);
6387                 /* this is the recipe that chains all the other recipes so it
6388                  * should not have a chaining ID to indicate the same
6389                  */
6390                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6391                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6392                                     l_entry) {
6393                         last_chain_entry->fv_idx[i] = entry->chain_idx;
6394                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
6395                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6396                         ice_set_bit(entry->rid, rm->r_bitmap);
6397                 }
6398                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6399                 if (sizeof(buf[recps].recipe_bitmap) >=
6400                     sizeof(rm->r_bitmap)) {
6401                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6402                                    sizeof(buf[recps].recipe_bitmap),
6403                                    ICE_NONDMA_TO_NONDMA);
6404                 } else {
6405                         status = ICE_ERR_BAD_PTR;
6406                         goto err_unroll;
6407                 }
6408                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6409
6410                 recps++;
6411                 rm->root_rid = (u8)rid;
6412         }
6413         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6414         if (status)
6415                 goto err_unroll;
6416
6417         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6418         ice_release_change_lock(hw);
6419         if (status)
6420                 goto err_unroll;
6421
6422         /* Every recipe that just got created add it to the recipe
6423          * book keeping list
6424          */
6425         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6426                 struct ice_switch_info *sw = hw->switch_info;
6427                 bool is_root, idx_found = false;
6428                 struct ice_sw_recipe *recp;
6429                 u16 idx, buf_idx = 0;
6430
6431                 /* find buffer index for copying some data */
6432                 for (idx = 0; idx < rm->n_grp_count; idx++)
6433                         if (buf[idx].recipe_indx == entry->rid) {
6434                                 buf_idx = idx;
6435                                 idx_found = true;
6436                         }
6437
6438                 if (!idx_found) {
6439                         status = ICE_ERR_OUT_OF_RANGE;
6440                         goto err_unroll;
6441                 }
6442
6443                 recp = &sw->recp_list[entry->rid];
6444                 is_root = (rm->root_rid == entry->rid);
6445                 recp->is_root = is_root;
6446
6447                 recp->root_rid = entry->rid;
6448                 recp->big_recp = (is_root && rm->n_grp_count > 1);
6449
6450                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6451                            entry->r_group.n_val_pairs *
6452                            sizeof(struct ice_fv_word),
6453                            ICE_NONDMA_TO_NONDMA);
6454
6455                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6456                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6457
6458                 /* Copy non-result fv index values and masks to recipe. This
6459                  * call will also update the result recipe bitmask.
6460                  */
6461                 ice_collect_result_idx(&buf[buf_idx], recp);
6462
6463                 /* for non-root recipes, also copy to the root, this allows
6464                  * easier matching of a complete chained recipe
6465                  */
6466                 if (!is_root)
6467                         ice_collect_result_idx(&buf[buf_idx],
6468                                                &sw->recp_list[rm->root_rid]);
6469
6470                 recp->n_ext_words = entry->r_group.n_val_pairs;
6471                 recp->chain_idx = entry->chain_idx;
6472                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6473                 recp->n_grp_count = rm->n_grp_count;
6474                 recp->tun_type = rm->tun_type;
6475                 recp->recp_created = true;
6476         }
6477         rm->root_buf = buf;
6478         ice_free(hw, tmp);
6479         return status;
6480
6481 err_unroll:
6482 err_mem:
6483         ice_free(hw, tmp);
6484         ice_free(hw, buf);
6485         return status;
6486 }
6487
6488 /**
6489  * ice_create_recipe_group - creates recipe group
6490  * @hw: pointer to hardware structure
6491  * @rm: recipe management list entry
6492  * @lkup_exts: lookup elements
6493  */
6494 static enum ice_status
6495 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6496                         struct ice_prot_lkup_ext *lkup_exts)
6497 {
6498         enum ice_status status;
6499         u8 recp_count = 0;
6500
6501         rm->n_grp_count = 0;
6502
6503         /* Create recipes for words that are marked not done by packing them
6504          * as best fit.
6505          */
6506         status = ice_create_first_fit_recp_def(hw, lkup_exts,
6507                                                &rm->rg_list, &recp_count);
6508         if (!status) {
6509                 rm->n_grp_count += recp_count;
6510                 rm->n_ext_words = lkup_exts->n_val_words;
6511                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6512                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6513                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6514                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6515         }
6516
6517         return status;
6518 }
6519
6520 /**
6521  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6522  * @hw: pointer to hardware structure
6523  * @lkups: lookup elements or match criteria for the advanced recipe, one
6524  *         structure per protocol header
6525  * @lkups_cnt: number of protocols
6526  * @bm: bitmap of field vectors to consider
6527  * @fv_list: pointer to a list that holds the returned field vectors
6528  */
6529 static enum ice_status
6530 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6531            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6532 {
6533         enum ice_status status;
6534         u8 *prot_ids;
6535         u16 i;
6536
6537         if (!lkups_cnt)
6538                 return ICE_SUCCESS;
6539
6540         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6541         if (!prot_ids)
6542                 return ICE_ERR_NO_MEMORY;
6543
6544         for (i = 0; i < lkups_cnt; i++)
6545                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6546                         status = ICE_ERR_CFG;
6547                         goto free_mem;
6548                 }
6549
6550         /* Find field vectors that include all specified protocol types */
6551         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6552
6553 free_mem:
6554         ice_free(hw, prot_ids);
6555         return status;
6556 }
6557
6558 /**
6559  * ice_tun_type_match_mask - determine if tun type needs a match mask
6560  * @tun_type: tunnel type
6561  * @mask: mask to be used for the tunnel
6562  */
6563 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6564 {
6565         switch (tun_type) {
6566         case ICE_SW_TUN_VXLAN_GPE:
6567         case ICE_SW_TUN_GENEVE:
6568         case ICE_SW_TUN_VXLAN:
6569         case ICE_SW_TUN_NVGRE:
6570         case ICE_SW_TUN_UDP:
6571         case ICE_ALL_TUNNELS:
6572                 *mask = ICE_TUN_FLAG_MASK;
6573                 return true;
6574
6575         case ICE_SW_TUN_GENEVE_VLAN:
6576         case ICE_SW_TUN_VXLAN_VLAN:
6577                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6578                 return true;
6579
6580         default:
6581                 *mask = 0;
6582                 return false;
6583         }
6584 }
6585
6586 /**
6587  * ice_add_special_words - Add words that are not protocols, such as metadata
6588  * @rinfo: other information regarding the rule e.g. priority and action info
6589  * @lkup_exts: lookup word structure
6590  */
6591 static enum ice_status
6592 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6593                       struct ice_prot_lkup_ext *lkup_exts)
6594 {
6595         u16 mask;
6596
6597         /* If this is a tunneled packet, then add recipe index to match the
6598          * tunnel bit in the packet metadata flags.
6599          */
6600         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6601                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6602                         u8 word = lkup_exts->n_val_words++;
6603
6604                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6605                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6606                         lkup_exts->field_mask[word] = mask;
6607                 } else {
6608                         return ICE_ERR_MAX_LIMIT;
6609                 }
6610         }
6611
6612         return ICE_SUCCESS;
6613 }
6614
6615 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6616  * @hw: pointer to hardware structure
6617  * @rinfo: other information regarding the rule e.g. priority and action info
6618  * @bm: pointer to memory for returning the bitmap of field vectors
6619  */
6620 static void
6621 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6622                          ice_bitmap_t *bm)
6623 {
6624         enum ice_prof_type prof_type;
6625
6626         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6627
6628         switch (rinfo->tun_type) {
6629         case ICE_NON_TUN:
6630                 prof_type = ICE_PROF_NON_TUN;
6631                 break;
6632         case ICE_ALL_TUNNELS:
6633                 prof_type = ICE_PROF_TUN_ALL;
6634                 break;
6635         case ICE_SW_TUN_VXLAN_GPE:
6636         case ICE_SW_TUN_GENEVE:
6637         case ICE_SW_TUN_GENEVE_VLAN:
6638         case ICE_SW_TUN_VXLAN:
6639         case ICE_SW_TUN_VXLAN_VLAN:
6640         case ICE_SW_TUN_UDP:
6641         case ICE_SW_TUN_GTP:
6642                 prof_type = ICE_PROF_TUN_UDP;
6643                 break;
6644         case ICE_SW_TUN_NVGRE:
6645                 prof_type = ICE_PROF_TUN_GRE;
6646                 break;
6647         case ICE_SW_TUN_PPPOE:
6648                 prof_type = ICE_PROF_TUN_PPPOE;
6649                 break;
6650         case ICE_SW_TUN_PPPOE_PAY:
6651                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6652                 return;
6653         case ICE_SW_TUN_PPPOE_IPV4:
6654                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6655                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6656                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6657                 return;
6658         case ICE_SW_TUN_PPPOE_IPV4_TCP:
6659                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6660                 return;
6661         case ICE_SW_TUN_PPPOE_IPV4_UDP:
6662                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6663                 return;
6664         case ICE_SW_TUN_PPPOE_IPV6:
6665                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6666                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6667                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6668                 return;
6669         case ICE_SW_TUN_PPPOE_IPV6_TCP:
6670                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6671                 return;
6672         case ICE_SW_TUN_PPPOE_IPV6_UDP:
6673                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6674                 return;
6675         case ICE_SW_TUN_PROFID_IPV6_ESP:
6676         case ICE_SW_TUN_IPV6_ESP:
6677                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6678                 return;
6679         case ICE_SW_TUN_PROFID_IPV6_AH:
6680         case ICE_SW_TUN_IPV6_AH:
6681                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6682                 return;
6683         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6684         case ICE_SW_TUN_IPV6_L2TPV3:
6685                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6686                 return;
6687         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6688         case ICE_SW_TUN_IPV6_NAT_T:
6689                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6690                 return;
6691         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6692                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6693                 return;
6694         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6695                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6696                 return;
6697         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6698                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6699                 return;
6700         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6701                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6702                 return;
6703         case ICE_SW_TUN_IPV4_NAT_T:
6704                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6705                 return;
6706         case ICE_SW_TUN_IPV4_L2TPV3:
6707                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6708                 return;
6709         case ICE_SW_TUN_IPV4_ESP:
6710                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6711                 return;
6712         case ICE_SW_TUN_IPV4_AH:
6713                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6714                 return;
6715         case ICE_SW_IPV4_TCP:
6716                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6717                 return;
6718         case ICE_SW_IPV4_UDP:
6719                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6720                 return;
6721         case ICE_SW_IPV6_TCP:
6722                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6723                 return;
6724         case ICE_SW_IPV6_UDP:
6725                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6726                 return;
6727         case ICE_SW_TUN_IPV4_GTPU_IPV4:
6728                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6729                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6730                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6731                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6732                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6733                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6734                 return;
6735         case ICE_SW_TUN_IPV6_GTPU_IPV4:
6736                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6737                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6738                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6739                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6740                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6741                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6742                 return;
6743         case ICE_SW_TUN_IPV4_GTPU_IPV6:
6744                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6745                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6746                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6747                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6748                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6749                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6750                 return;
6751         case ICE_SW_TUN_IPV6_GTPU_IPV6:
6752                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6753                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6754                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6755                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6756                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6757                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6758                 return;
6759         case ICE_SW_TUN_AND_NON_TUN:
6760         default:
6761                 prof_type = ICE_PROF_ALL;
6762                 break;
6763         }
6764
6765         ice_get_sw_fv_bitmap(hw, prof_type, bm);
6766 }
6767
6768 /**
6769  * ice_is_prof_rule - determine if rule type is a profile rule
6770  * @type: the rule type
6771  *
6772  * if the rule type is a profile rule, that means that there no field value
6773  * match required, in this case just a profile hit is required.
6774  */
6775 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
6776 {
6777         switch (type) {
6778         case ICE_SW_TUN_PROFID_IPV6_ESP:
6779         case ICE_SW_TUN_PROFID_IPV6_AH:
6780         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6781         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6782         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6783         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6784         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6785         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6786                 return true;
6787         default:
6788                 break;
6789         }
6790
6791         return false;
6792 }
6793
6794 /**
6795  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
6796  * @hw: pointer to hardware structure
6797  * @lkups: lookup elements or match criteria for the advanced recipe, one
6798  *  structure per protocol header
6799  * @lkups_cnt: number of protocols
6800  * @rinfo: other information regarding the rule e.g. priority and action info
6801  * @rid: return the recipe ID of the recipe created
6802  */
6803 static enum ice_status
6804 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6805                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
6806 {
6807         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
6808         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6809         struct ice_prot_lkup_ext *lkup_exts;
6810         struct ice_recp_grp_entry *r_entry;
6811         struct ice_sw_fv_list_entry *fvit;
6812         struct ice_recp_grp_entry *r_tmp;
6813         struct ice_sw_fv_list_entry *tmp;
6814         enum ice_status status = ICE_SUCCESS;
6815         struct ice_sw_recipe *rm;
6816         u8 i;
6817
6818         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
6819                 return ICE_ERR_PARAM;
6820
6821         lkup_exts = (struct ice_prot_lkup_ext *)
6822                 ice_malloc(hw, sizeof(*lkup_exts));
6823         if (!lkup_exts)
6824                 return ICE_ERR_NO_MEMORY;
6825
6826         /* Determine the number of words to be matched and if it exceeds a
6827          * recipe's restrictions
6828          */
6829         for (i = 0; i < lkups_cnt; i++) {
6830                 u16 count;
6831
6832                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
6833                         status = ICE_ERR_CFG;
6834                         goto err_free_lkup_exts;
6835                 }
6836
6837                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
6838                 if (!count) {
6839                         status = ICE_ERR_CFG;
6840                         goto err_free_lkup_exts;
6841                 }
6842         }
6843
6844         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
6845         if (!rm) {
6846                 status = ICE_ERR_NO_MEMORY;
6847                 goto err_free_lkup_exts;
6848         }
6849
6850         /* Get field vectors that contain fields extracted from all the protocol
6851          * headers being programmed.
6852          */
6853         INIT_LIST_HEAD(&rm->fv_list);
6854         INIT_LIST_HEAD(&rm->rg_list);
6855
6856         /* Get bitmap of field vectors (profiles) that are compatible with the
6857          * rule request; only these will be searched in the subsequent call to
6858          * ice_get_fv.
6859          */
6860         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
6861
6862         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
6863         if (status)
6864                 goto err_unroll;
6865
6866         /* Create any special protocol/offset pairs, such as looking at tunnel
6867          * bits by extracting metadata
6868          */
6869         status = ice_add_special_words(rinfo, lkup_exts);
6870         if (status)
6871                 goto err_free_lkup_exts;
6872
6873         /* Group match words into recipes using preferred recipe grouping
6874          * criteria.
6875          */
6876         status = ice_create_recipe_group(hw, rm, lkup_exts);
6877         if (status)
6878                 goto err_unroll;
6879
6880         /* set the recipe priority if specified */
6881         rm->priority = (u8)rinfo->priority;
6882
6883         /* Find offsets from the field vector. Pick the first one for all the
6884          * recipes.
6885          */
6886         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
6887         if (status)
6888                 goto err_unroll;
6889
6890         /* An empty FV list means to use all the profiles returned in the
6891          * profile bitmap
6892          */
6893         if (LIST_EMPTY(&rm->fv_list)) {
6894                 u16 j;
6895
6896                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
6897                         struct ice_sw_fv_list_entry *fvl;
6898
6899                         fvl = (struct ice_sw_fv_list_entry *)
6900                                 ice_malloc(hw, sizeof(*fvl));
6901                         if (!fvl)
6902                                 goto err_unroll;
6903                         fvl->fv_ptr = NULL;
6904                         fvl->profile_id = j;
6905                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
6906                 }
6907         }
6908
6909         /* get bitmap of all profiles the recipe will be associated with */
6910         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
6911         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6912                             list_entry) {
6913                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
6914                 ice_set_bit((u16)fvit->profile_id, profiles);
6915         }
6916
6917         /* Look for a recipe which matches our requested fv / mask list */
6918         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
6919         if (*rid < ICE_MAX_NUM_RECIPES)
6920                 /* Success if found a recipe that match the existing criteria */
6921                 goto err_unroll;
6922
6923         rm->tun_type = rinfo->tun_type;
6924         /* Recipe we need does not exist, add a recipe */
6925         status = ice_add_sw_recipe(hw, rm, profiles);
6926         if (status)
6927                 goto err_unroll;
6928
6929         /* Associate all the recipes created with all the profiles in the
6930          * common field vector.
6931          */
6932         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
6933                             list_entry) {
6934                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
6935                 u16 j;
6936
6937                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
6938                                                       (u8 *)r_bitmap, NULL);
6939                 if (status)
6940                         goto err_unroll;
6941
6942                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
6943                               ICE_MAX_NUM_RECIPES);
6944                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6945                 if (status)
6946                         goto err_unroll;
6947
6948                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
6949                                                       (u8 *)r_bitmap,
6950                                                       NULL);
6951                 ice_release_change_lock(hw);
6952
6953                 if (status)
6954                         goto err_unroll;
6955
6956                 /* Update profile to recipe bitmap array */
6957                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
6958                               ICE_MAX_NUM_RECIPES);
6959
6960                 /* Update recipe to profile bitmap array */
6961                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
6962                         ice_set_bit((u16)fvit->profile_id,
6963                                     recipe_to_profile[j]);
6964         }
6965
6966         *rid = rm->root_rid;
6967         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
6968                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
6969 err_unroll:
6970         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
6971                                  ice_recp_grp_entry, l_entry) {
6972                 LIST_DEL(&r_entry->l_entry);
6973                 ice_free(hw, r_entry);
6974         }
6975
6976         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
6977                                  list_entry) {
6978                 LIST_DEL(&fvit->list_entry);
6979                 ice_free(hw, fvit);
6980         }
6981
6982         if (rm->root_buf)
6983                 ice_free(hw, rm->root_buf);
6984
6985         ice_free(hw, rm);
6986
6987 err_free_lkup_exts:
6988         ice_free(hw, lkup_exts);
6989
6990         return status;
6991 }
6992
6993 /**
6994  * ice_find_dummy_packet - find dummy packet by tunnel type
6995  *
6996  * @lkups: lookup elements or match criteria for the advanced recipe, one
6997  *         structure per protocol header
6998  * @lkups_cnt: number of protocols
6999  * @tun_type: tunnel type from the match criteria
7000  * @pkt: dummy packet to fill according to filter match criteria
7001  * @pkt_len: packet length of dummy packet
7002  * @offsets: pointer to receive the pointer to the offsets for the packet
7003  */
7004 static void
7005 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7006                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7007                       u16 *pkt_len,
7008                       const struct ice_dummy_pkt_offsets **offsets)
7009 {
7010         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7011         bool gre = false;
7012         u16 i;
7013
7014         for (i = 0; i < lkups_cnt; i++) {
7015                 if (lkups[i].type == ICE_UDP_ILOS)
7016                         udp = true;
7017                 else if (lkups[i].type == ICE_TCP_IL)
7018                         tcp = true;
7019                 else if (lkups[i].type == ICE_IPV6_OFOS)
7020                         ipv6 = true;
7021                 else if (lkups[i].type == ICE_VLAN_OFOS)
7022                         vlan = true;
7023                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7024                          lkups[i].h_u.ipv4_hdr.protocol ==
7025                                 ICE_IPV4_NVGRE_PROTO_ID &&
7026                          lkups[i].m_u.ipv4_hdr.protocol ==
7027                                 0xFF)
7028                         gre = true;
7029                 else if (lkups[i].type == ICE_PPPOE &&
7030                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7031                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7032                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7033                                 0xFFFF)
7034                         ipv6 = true;
7035                 else if (lkups[i].type == ICE_ETYPE_OL &&
7036                          lkups[i].h_u.ethertype.ethtype_id ==
7037                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7038                          lkups[i].m_u.ethertype.ethtype_id ==
7039                                         0xFFFF)
7040                         ipv6 = true;
7041                 else if (lkups[i].type == ICE_IPV4_IL &&
7042                          lkups[i].h_u.ipv4_hdr.protocol ==
7043                                 ICE_TCP_PROTO_ID &&
7044                          lkups[i].m_u.ipv4_hdr.protocol ==
7045                                 0xFF)
7046                         tcp = true;
7047         }
7048
7049         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7050                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7051                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7052                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7053                 return;
7054         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7055                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7056                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7057                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7058                 return;
7059         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7060                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7061                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7062                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7063                 return;
7064         } else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7065                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
7066                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7067                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7068                 return;
7069         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7070                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
7071                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7072                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7073                 return;
7074         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7075                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
7076                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7077                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7078                 return;
7079         }
7080
7081         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7082                 *pkt = dummy_ipv4_esp_pkt;
7083                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7084                 *offsets = dummy_ipv4_esp_packet_offsets;
7085                 return;
7086         }
7087
7088         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7089                 *pkt = dummy_ipv6_esp_pkt;
7090                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7091                 *offsets = dummy_ipv6_esp_packet_offsets;
7092                 return;
7093         }
7094
7095         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7096                 *pkt = dummy_ipv4_ah_pkt;
7097                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7098                 *offsets = dummy_ipv4_ah_packet_offsets;
7099                 return;
7100         }
7101
7102         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7103                 *pkt = dummy_ipv6_ah_pkt;
7104                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7105                 *offsets = dummy_ipv6_ah_packet_offsets;
7106                 return;
7107         }
7108
7109         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7110                 *pkt = dummy_ipv4_nat_pkt;
7111                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7112                 *offsets = dummy_ipv4_nat_packet_offsets;
7113                 return;
7114         }
7115
7116         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7117                 *pkt = dummy_ipv6_nat_pkt;
7118                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
7119                 *offsets = dummy_ipv6_nat_packet_offsets;
7120                 return;
7121         }
7122
7123         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7124                 *pkt = dummy_ipv4_l2tpv3_pkt;
7125                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7126                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
7127                 return;
7128         }
7129
7130         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7131                 *pkt = dummy_ipv6_l2tpv3_pkt;
7132                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7133                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
7134                 return;
7135         }
7136
7137         if (tun_type == ICE_SW_TUN_GTP) {
7138                 *pkt = dummy_udp_gtp_packet;
7139                 *pkt_len = sizeof(dummy_udp_gtp_packet);
7140                 *offsets = dummy_udp_gtp_packet_offsets;
7141                 return;
7142         }
7143
7144         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7145                 *pkt = dummy_pppoe_ipv6_packet;
7146                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7147                 *offsets = dummy_pppoe_packet_offsets;
7148                 return;
7149         } else if (tun_type == ICE_SW_TUN_PPPOE ||
7150                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
7151                 *pkt = dummy_pppoe_ipv4_packet;
7152                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7153                 *offsets = dummy_pppoe_packet_offsets;
7154                 return;
7155         }
7156
7157         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7158                 *pkt = dummy_pppoe_ipv4_packet;
7159                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7160                 *offsets = dummy_pppoe_packet_ipv4_offsets;
7161                 return;
7162         }
7163
7164         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7165                 *pkt = dummy_pppoe_ipv4_tcp_packet;
7166                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7167                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7168                 return;
7169         }
7170
7171         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7172                 *pkt = dummy_pppoe_ipv4_udp_packet;
7173                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7174                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7175                 return;
7176         }
7177
7178         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7179                 *pkt = dummy_pppoe_ipv6_packet;
7180                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7181                 *offsets = dummy_pppoe_packet_ipv6_offsets;
7182                 return;
7183         }
7184
7185         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7186                 *pkt = dummy_pppoe_ipv6_tcp_packet;
7187                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7188                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7189                 return;
7190         }
7191
7192         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7193                 *pkt = dummy_pppoe_ipv6_udp_packet;
7194                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7195                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7196                 return;
7197         }
7198
7199         if (tun_type == ICE_SW_IPV4_TCP) {
7200                 *pkt = dummy_tcp_packet;
7201                 *pkt_len = sizeof(dummy_tcp_packet);
7202                 *offsets = dummy_tcp_packet_offsets;
7203                 return;
7204         }
7205
7206         if (tun_type == ICE_SW_IPV4_UDP) {
7207                 *pkt = dummy_udp_packet;
7208                 *pkt_len = sizeof(dummy_udp_packet);
7209                 *offsets = dummy_udp_packet_offsets;
7210                 return;
7211         }
7212
7213         if (tun_type == ICE_SW_IPV6_TCP) {
7214                 *pkt = dummy_tcp_ipv6_packet;
7215                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7216                 *offsets = dummy_tcp_ipv6_packet_offsets;
7217                 return;
7218         }
7219
7220         if (tun_type == ICE_SW_IPV6_UDP) {
7221                 *pkt = dummy_udp_ipv6_packet;
7222                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7223                 *offsets = dummy_udp_ipv6_packet_offsets;
7224                 return;
7225         }
7226
7227         if (tun_type == ICE_ALL_TUNNELS) {
7228                 *pkt = dummy_gre_udp_packet;
7229                 *pkt_len = sizeof(dummy_gre_udp_packet);
7230                 *offsets = dummy_gre_udp_packet_offsets;
7231                 return;
7232         }
7233
7234         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7235                 if (tcp) {
7236                         *pkt = dummy_gre_tcp_packet;
7237                         *pkt_len = sizeof(dummy_gre_tcp_packet);
7238                         *offsets = dummy_gre_tcp_packet_offsets;
7239                         return;
7240                 }
7241
7242                 *pkt = dummy_gre_udp_packet;
7243                 *pkt_len = sizeof(dummy_gre_udp_packet);
7244                 *offsets = dummy_gre_udp_packet_offsets;
7245                 return;
7246         }
7247
7248         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7249             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7250             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7251             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7252                 if (tcp) {
7253                         *pkt = dummy_udp_tun_tcp_packet;
7254                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7255                         *offsets = dummy_udp_tun_tcp_packet_offsets;
7256                         return;
7257                 }
7258
7259                 *pkt = dummy_udp_tun_udp_packet;
7260                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
7261                 *offsets = dummy_udp_tun_udp_packet_offsets;
7262                 return;
7263         }
7264
7265         if (udp && !ipv6) {
7266                 if (vlan) {
7267                         *pkt = dummy_vlan_udp_packet;
7268                         *pkt_len = sizeof(dummy_vlan_udp_packet);
7269                         *offsets = dummy_vlan_udp_packet_offsets;
7270                         return;
7271                 }
7272                 *pkt = dummy_udp_packet;
7273                 *pkt_len = sizeof(dummy_udp_packet);
7274                 *offsets = dummy_udp_packet_offsets;
7275                 return;
7276         } else if (udp && ipv6) {
7277                 if (vlan) {
7278                         *pkt = dummy_vlan_udp_ipv6_packet;
7279                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7280                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
7281                         return;
7282                 }
7283                 *pkt = dummy_udp_ipv6_packet;
7284                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
7285                 *offsets = dummy_udp_ipv6_packet_offsets;
7286                 return;
7287         } else if ((tcp && ipv6) || ipv6) {
7288                 if (vlan) {
7289                         *pkt = dummy_vlan_tcp_ipv6_packet;
7290                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7291                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7292                         return;
7293                 }
7294                 *pkt = dummy_tcp_ipv6_packet;
7295                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
7296                 *offsets = dummy_tcp_ipv6_packet_offsets;
7297                 return;
7298         }
7299
7300         if (vlan) {
7301                 *pkt = dummy_vlan_tcp_packet;
7302                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
7303                 *offsets = dummy_vlan_tcp_packet_offsets;
7304         } else {
7305                 *pkt = dummy_tcp_packet;
7306                 *pkt_len = sizeof(dummy_tcp_packet);
7307                 *offsets = dummy_tcp_packet_offsets;
7308         }
7309 }
7310
7311 /**
7312  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7313  *
7314  * @lkups: lookup elements or match criteria for the advanced recipe, one
7315  *         structure per protocol header
7316  * @lkups_cnt: number of protocols
7317  * @s_rule: stores rule information from the match criteria
7318  * @dummy_pkt: dummy packet to fill according to filter match criteria
7319  * @pkt_len: packet length of dummy packet
7320  * @offsets: offset info for the dummy packet
7321  */
7322 static enum ice_status
7323 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7324                           struct ice_aqc_sw_rules_elem *s_rule,
7325                           const u8 *dummy_pkt, u16 pkt_len,
7326                           const struct ice_dummy_pkt_offsets *offsets)
7327 {
7328         u8 *pkt;
7329         u16 i;
7330
7331         /* Start with a packet with a pre-defined/dummy content. Then, fill
7332          * in the header values to be looked up or matched.
7333          */
7334         pkt = s_rule->pdata.lkup_tx_rx.hdr;
7335
7336         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7337
7338         for (i = 0; i < lkups_cnt; i++) {
7339                 enum ice_protocol_type type;
7340                 u16 offset = 0, len = 0, j;
7341                 bool found = false;
7342
7343                 /* find the start of this layer; it should be found since this
7344                  * was already checked when search for the dummy packet
7345                  */
7346                 type = lkups[i].type;
7347                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7348                         if (type == offsets[j].type) {
7349                                 offset = offsets[j].offset;
7350                                 found = true;
7351                                 break;
7352                         }
7353                 }
7354                 /* this should never happen in a correct calling sequence */
7355                 if (!found)
7356                         return ICE_ERR_PARAM;
7357
7358                 switch (lkups[i].type) {
7359                 case ICE_MAC_OFOS:
7360                 case ICE_MAC_IL:
7361                         len = sizeof(struct ice_ether_hdr);
7362                         break;
7363                 case ICE_ETYPE_OL:
7364                         len = sizeof(struct ice_ethtype_hdr);
7365                         break;
7366                 case ICE_VLAN_OFOS:
7367                         len = sizeof(struct ice_vlan_hdr);
7368                         break;
7369                 case ICE_IPV4_OFOS:
7370                 case ICE_IPV4_IL:
7371                         len = sizeof(struct ice_ipv4_hdr);
7372                         break;
7373                 case ICE_IPV6_OFOS:
7374                 case ICE_IPV6_IL:
7375                         len = sizeof(struct ice_ipv6_hdr);
7376                         break;
7377                 case ICE_TCP_IL:
7378                 case ICE_UDP_OF:
7379                 case ICE_UDP_ILOS:
7380                         len = sizeof(struct ice_l4_hdr);
7381                         break;
7382                 case ICE_SCTP_IL:
7383                         len = sizeof(struct ice_sctp_hdr);
7384                         break;
7385                 case ICE_NVGRE:
7386                         len = sizeof(struct ice_nvgre);
7387                         break;
7388                 case ICE_VXLAN:
7389                 case ICE_GENEVE:
7390                 case ICE_VXLAN_GPE:
7391                         len = sizeof(struct ice_udp_tnl_hdr);
7392                         break;
7393
7394                 case ICE_GTP:
7395                 case ICE_GTP_NO_PAY:
7396                         len = sizeof(struct ice_udp_gtp_hdr);
7397                         break;
7398                 case ICE_PPPOE:
7399                         len = sizeof(struct ice_pppoe_hdr);
7400                         break;
7401                 case ICE_ESP:
7402                         len = sizeof(struct ice_esp_hdr);
7403                         break;
7404                 case ICE_NAT_T:
7405                         len = sizeof(struct ice_nat_t_hdr);
7406                         break;
7407                 case ICE_AH:
7408                         len = sizeof(struct ice_ah_hdr);
7409                         break;
7410                 case ICE_L2TPV3:
7411                         len = sizeof(struct ice_l2tpv3_sess_hdr);
7412                         break;
7413                 default:
7414                         return ICE_ERR_PARAM;
7415                 }
7416
7417                 /* the length should be a word multiple */
7418                 if (len % ICE_BYTES_PER_WORD)
7419                         return ICE_ERR_CFG;
7420
7421                 /* We have the offset to the header start, the length, the
7422                  * caller's header values and mask. Use this information to
7423                  * copy the data into the dummy packet appropriately based on
7424                  * the mask. Note that we need to only write the bits as
7425                  * indicated by the mask to make sure we don't improperly write
7426                  * over any significant packet data.
7427                  */
7428                 for (j = 0; j < len / sizeof(u16); j++)
7429                         if (((u16 *)&lkups[i].m_u)[j])
7430                                 ((u16 *)(pkt + offset))[j] =
7431                                         (((u16 *)(pkt + offset))[j] &
7432                                          ~((u16 *)&lkups[i].m_u)[j]) |
7433                                         (((u16 *)&lkups[i].h_u)[j] &
7434                                          ((u16 *)&lkups[i].m_u)[j]);
7435         }
7436
7437         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7438
7439         return ICE_SUCCESS;
7440 }
7441
7442 /**
7443  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7444  * @hw: pointer to the hardware structure
7445  * @tun_type: tunnel type
7446  * @pkt: dummy packet to fill in
7447  * @offsets: offset info for the dummy packet
7448  */
7449 static enum ice_status
7450 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7451                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7452 {
7453         u16 open_port, i;
7454
7455         switch (tun_type) {
7456         case ICE_SW_TUN_AND_NON_TUN:
7457         case ICE_SW_TUN_VXLAN_GPE:
7458         case ICE_SW_TUN_VXLAN:
7459         case ICE_SW_TUN_VXLAN_VLAN:
7460         case ICE_SW_TUN_UDP:
7461                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7462                         return ICE_ERR_CFG;
7463                 break;
7464
7465         case ICE_SW_TUN_GENEVE:
7466         case ICE_SW_TUN_GENEVE_VLAN:
7467                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7468                         return ICE_ERR_CFG;
7469                 break;
7470
7471         default:
7472                 /* Nothing needs to be done for this tunnel type */
7473                 return ICE_SUCCESS;
7474         }
7475
7476         /* Find the outer UDP protocol header and insert the port number */
7477         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7478                 if (offsets[i].type == ICE_UDP_OF) {
7479                         struct ice_l4_hdr *hdr;
7480                         u16 offset;
7481
7482                         offset = offsets[i].offset;
7483                         hdr = (struct ice_l4_hdr *)&pkt[offset];
7484                         hdr->dst_port = CPU_TO_BE16(open_port);
7485
7486                         return ICE_SUCCESS;
7487                 }
7488         }
7489
7490         return ICE_ERR_CFG;
7491 }
7492
7493 /**
7494  * ice_find_adv_rule_entry - Search a rule entry
7495  * @hw: pointer to the hardware structure
7496  * @lkups: lookup elements or match criteria for the advanced recipe, one
7497  *         structure per protocol header
7498  * @lkups_cnt: number of protocols
7499  * @recp_id: recipe ID for which we are finding the rule
7500  * @rinfo: other information regarding the rule e.g. priority and action info
7501  *
7502  * Helper function to search for a given advance rule entry
7503  * Returns pointer to entry storing the rule if found
7504  */
7505 static struct ice_adv_fltr_mgmt_list_entry *
7506 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7507                         u16 lkups_cnt, u16 recp_id,
7508                         struct ice_adv_rule_info *rinfo)
7509 {
7510         struct ice_adv_fltr_mgmt_list_entry *list_itr;
7511         struct ice_switch_info *sw = hw->switch_info;
7512         int i;
7513
7514         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7515                             ice_adv_fltr_mgmt_list_entry, list_entry) {
7516                 bool lkups_matched = true;
7517
7518                 if (lkups_cnt != list_itr->lkups_cnt)
7519                         continue;
7520                 for (i = 0; i < list_itr->lkups_cnt; i++)
7521                         if (memcmp(&list_itr->lkups[i], &lkups[i],
7522                                    sizeof(*lkups))) {
7523                                 lkups_matched = false;
7524                                 break;
7525                         }
7526                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7527                     rinfo->tun_type == list_itr->rule_info.tun_type &&
7528                     lkups_matched)
7529                         return list_itr;
7530         }
7531         return NULL;
7532 }
7533
7534 /**
7535  * ice_adv_add_update_vsi_list
7536  * @hw: pointer to the hardware structure
7537  * @m_entry: pointer to current adv filter management list entry
7538  * @cur_fltr: filter information from the book keeping entry
7539  * @new_fltr: filter information with the new VSI to be added
7540  *
7541  * Call AQ command to add or update previously created VSI list with new VSI.
7542  *
7543  * Helper function to do book keeping associated with adding filter information
7544  * The algorithm to do the booking keeping is described below :
7545  * When a VSI needs to subscribe to a given advanced filter
7546  *      if only one VSI has been added till now
7547  *              Allocate a new VSI list and add two VSIs
7548  *              to this list using switch rule command
7549  *              Update the previously created switch rule with the
7550  *              newly created VSI list ID
7551  *      if a VSI list was previously created
7552  *              Add the new VSI to the previously created VSI list set
7553  *              using the update switch rule command
7554  */
7555 static enum ice_status
7556 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7557                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
7558                             struct ice_adv_rule_info *cur_fltr,
7559                             struct ice_adv_rule_info *new_fltr)
7560 {
7561         enum ice_status status;
7562         u16 vsi_list_id = 0;
7563
7564         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7565             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7566             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7567                 return ICE_ERR_NOT_IMPL;
7568
7569         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7570              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7571             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7572              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7573                 return ICE_ERR_NOT_IMPL;
7574
7575         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7576                  /* Only one entry existed in the mapping and it was not already
7577                   * a part of a VSI list. So, create a VSI list with the old and
7578                   * new VSIs.
7579                   */
7580                 struct ice_fltr_info tmp_fltr;
7581                 u16 vsi_handle_arr[2];
7582
7583                 /* A rule already exists with the new VSI being added */
7584                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7585                     new_fltr->sw_act.fwd_id.hw_vsi_id)
7586                         return ICE_ERR_ALREADY_EXISTS;
7587
7588                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7589                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7590                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7591                                                   &vsi_list_id,
7592                                                   ICE_SW_LKUP_LAST);
7593                 if (status)
7594                         return status;
7595
7596                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7597                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7598                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7599                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7600                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7601                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7602
7603                 /* Update the previous switch rule of "forward to VSI" to
7604                  * "fwd to VSI list"
7605                  */
7606                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7607                 if (status)
7608                         return status;
7609
7610                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7611                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7612                 m_entry->vsi_list_info =
7613                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7614                                                 vsi_list_id);
7615         } else {
7616                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7617
7618                 if (!m_entry->vsi_list_info)
7619                         return ICE_ERR_CFG;
7620
7621                 /* A rule already exists with the new VSI being added */
7622                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7623                         return ICE_SUCCESS;
7624
7625                 /* Update the previously created VSI list set with
7626                  * the new VSI ID passed in
7627                  */
7628                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7629
7630                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7631                                                   vsi_list_id, false,
7632                                                   ice_aqc_opc_update_sw_rules,
7633                                                   ICE_SW_LKUP_LAST);
7634                 /* update VSI list mapping info with new VSI ID */
7635                 if (!status)
7636                         ice_set_bit(vsi_handle,
7637                                     m_entry->vsi_list_info->vsi_map);
7638         }
7639         if (!status)
7640                 m_entry->vsi_count++;
7641         return status;
7642 }
7643
7644 /**
7645  * ice_add_adv_rule - helper function to create an advanced switch rule
7646  * @hw: pointer to the hardware structure
7647  * @lkups: information on the words that needs to be looked up. All words
7648  * together makes one recipe
7649  * @lkups_cnt: num of entries in the lkups array
7650  * @rinfo: other information related to the rule that needs to be programmed
7651  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7652  *               ignored is case of error.
7653  *
7654  * This function can program only 1 rule at a time. The lkups is used to
7655  * describe the all the words that forms the "lookup" portion of the recipe.
7656  * These words can span multiple protocols. Callers to this function need to
7657  * pass in a list of protocol headers with lookup information along and mask
7658  * that determines which words are valid from the given protocol header.
7659  * rinfo describes other information related to this rule such as forwarding
7660  * IDs, priority of this rule, etc.
7661  */
7662 enum ice_status
7663 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7664                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7665                  struct ice_rule_query_data *added_entry)
7666 {
7667         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7668         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7669         const struct ice_dummy_pkt_offsets *pkt_offsets;
7670         struct ice_aqc_sw_rules_elem *s_rule = NULL;
7671         struct LIST_HEAD_TYPE *rule_head;
7672         struct ice_switch_info *sw;
7673         enum ice_status status;
7674         const u8 *pkt = NULL;
7675         bool prof_rule;
7676         u16 word_cnt;
7677         u32 act = 0;
7678         u8 q_rgn;
7679
7680         /* Initialize profile to result index bitmap */
7681         if (!hw->switch_info->prof_res_bm_init) {
7682                 hw->switch_info->prof_res_bm_init = 1;
7683                 ice_init_prof_result_bm(hw);
7684         }
7685
7686         prof_rule = ice_is_prof_rule(rinfo->tun_type);
7687         if (!prof_rule && !lkups_cnt)
7688                 return ICE_ERR_PARAM;
7689
7690         /* get # of words we need to match */
7691         word_cnt = 0;
7692         for (i = 0; i < lkups_cnt; i++) {
7693                 u16 j, *ptr;
7694
7695                 ptr = (u16 *)&lkups[i].m_u;
7696                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7697                         if (ptr[j] != 0)
7698                                 word_cnt++;
7699         }
7700
7701         if (prof_rule) {
7702                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
7703                         return ICE_ERR_PARAM;
7704         } else {
7705                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7706                         return ICE_ERR_PARAM;
7707         }
7708
7709         /* make sure that we can locate a dummy packet */
7710         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7711                               &pkt_offsets);
7712         if (!pkt) {
7713                 status = ICE_ERR_PARAM;
7714                 goto err_ice_add_adv_rule;
7715         }
7716
7717         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7718               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7719               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7720               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7721                 return ICE_ERR_CFG;
7722
7723         vsi_handle = rinfo->sw_act.vsi_handle;
7724         if (!ice_is_vsi_valid(hw, vsi_handle))
7725                 return ICE_ERR_PARAM;
7726
7727         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7728                 rinfo->sw_act.fwd_id.hw_vsi_id =
7729                         ice_get_hw_vsi_num(hw, vsi_handle);
7730         if (rinfo->sw_act.flag & ICE_FLTR_TX)
7731                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
7732
7733         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
7734         if (status)
7735                 return status;
7736         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
7737         if (m_entry) {
7738                 /* we have to add VSI to VSI_LIST and increment vsi_count.
7739                  * Also Update VSI list so that we can change forwarding rule
7740                  * if the rule already exists, we will check if it exists with
7741                  * same vsi_id, if not then add it to the VSI list if it already
7742                  * exists if not then create a VSI list and add the existing VSI
7743                  * ID and the new VSI ID to the list
7744                  * We will add that VSI to the list
7745                  */
7746                 status = ice_adv_add_update_vsi_list(hw, m_entry,
7747                                                      &m_entry->rule_info,
7748                                                      rinfo);
7749                 if (added_entry) {
7750                         added_entry->rid = rid;
7751                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
7752                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7753                 }
7754                 return status;
7755         }
7756         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
7757         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
7758         if (!s_rule)
7759                 return ICE_ERR_NO_MEMORY;
7760         act |= ICE_SINGLE_ACT_LAN_ENABLE;
7761         switch (rinfo->sw_act.fltr_act) {
7762         case ICE_FWD_TO_VSI:
7763                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
7764                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
7765                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
7766                 break;
7767         case ICE_FWD_TO_Q:
7768                 act |= ICE_SINGLE_ACT_TO_Q;
7769                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7770                        ICE_SINGLE_ACT_Q_INDEX_M;
7771                 break;
7772         case ICE_FWD_TO_QGRP:
7773                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
7774                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
7775                 act |= ICE_SINGLE_ACT_TO_Q;
7776                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
7777                        ICE_SINGLE_ACT_Q_INDEX_M;
7778                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
7779                        ICE_SINGLE_ACT_Q_REGION_M;
7780                 break;
7781         case ICE_DROP_PACKET:
7782                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
7783                        ICE_SINGLE_ACT_VALID_BIT;
7784                 break;
7785         default:
7786                 status = ICE_ERR_CFG;
7787                 goto err_ice_add_adv_rule;
7788         }
7789
7790         /* set the rule LOOKUP type based on caller specified 'RX'
7791          * instead of hardcoding it to be either LOOKUP_TX/RX
7792          *
7793          * for 'RX' set the source to be the port number
7794          * for 'TX' set the source to be the source HW VSI number (determined
7795          * by caller)
7796          */
7797         if (rinfo->rx) {
7798                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
7799                 s_rule->pdata.lkup_tx_rx.src =
7800                         CPU_TO_LE16(hw->port_info->lport);
7801         } else {
7802                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
7803                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
7804         }
7805
7806         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
7807         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
7808
7809         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
7810                                            pkt_len, pkt_offsets);
7811         if (status)
7812                 goto err_ice_add_adv_rule;
7813
7814         if (rinfo->tun_type != ICE_NON_TUN &&
7815             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
7816                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
7817                                                  s_rule->pdata.lkup_tx_rx.hdr,
7818                                                  pkt_offsets);
7819                 if (status)
7820                         goto err_ice_add_adv_rule;
7821         }
7822
7823         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
7824                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
7825                                  NULL);
7826         if (status)
7827                 goto err_ice_add_adv_rule;
7828         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
7829                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
7830         if (!adv_fltr) {
7831                 status = ICE_ERR_NO_MEMORY;
7832                 goto err_ice_add_adv_rule;
7833         }
7834
7835         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
7836                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
7837                            ICE_NONDMA_TO_NONDMA);
7838         if (!adv_fltr->lkups && !prof_rule) {
7839                 status = ICE_ERR_NO_MEMORY;
7840                 goto err_ice_add_adv_rule;
7841         }
7842
7843         adv_fltr->lkups_cnt = lkups_cnt;
7844         adv_fltr->rule_info = *rinfo;
7845         adv_fltr->rule_info.fltr_rule_id =
7846                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
7847         sw = hw->switch_info;
7848         sw->recp_list[rid].adv_rule = true;
7849         rule_head = &sw->recp_list[rid].filt_rules;
7850
7851         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
7852                 adv_fltr->vsi_count = 1;
7853
7854         /* Add rule entry to book keeping list */
7855         LIST_ADD(&adv_fltr->list_entry, rule_head);
7856         if (added_entry) {
7857                 added_entry->rid = rid;
7858                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
7859                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
7860         }
7861 err_ice_add_adv_rule:
7862         if (status && adv_fltr) {
7863                 ice_free(hw, adv_fltr->lkups);
7864                 ice_free(hw, adv_fltr);
7865         }
7866
7867         ice_free(hw, s_rule);
7868
7869         return status;
7870 }
7871
7872 /**
7873  * ice_adv_rem_update_vsi_list
7874  * @hw: pointer to the hardware structure
7875  * @vsi_handle: VSI handle of the VSI to remove
7876  * @fm_list: filter management entry for which the VSI list management needs to
7877  *           be done
7878  */
7879 static enum ice_status
7880 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
7881                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
7882 {
7883         struct ice_vsi_list_map_info *vsi_list_info;
7884         enum ice_sw_lkup_type lkup_type;
7885         enum ice_status status;
7886         u16 vsi_list_id;
7887
7888         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
7889             fm_list->vsi_count == 0)
7890                 return ICE_ERR_PARAM;
7891
7892         /* A rule with the VSI being removed does not exist */
7893         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
7894                 return ICE_ERR_DOES_NOT_EXIST;
7895
7896         lkup_type = ICE_SW_LKUP_LAST;
7897         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
7898         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
7899                                           ice_aqc_opc_update_sw_rules,
7900                                           lkup_type);
7901         if (status)
7902                 return status;
7903
7904         fm_list->vsi_count--;
7905         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
7906         vsi_list_info = fm_list->vsi_list_info;
7907         if (fm_list->vsi_count == 1) {
7908                 struct ice_fltr_info tmp_fltr;
7909                 u16 rem_vsi_handle;
7910
7911                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
7912                                                     ICE_MAX_VSI);
7913                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
7914                         return ICE_ERR_OUT_OF_RANGE;
7915
7916                 /* Make sure VSI list is empty before removing it below */
7917                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
7918                                                   vsi_list_id, true,
7919                                                   ice_aqc_opc_update_sw_rules,
7920                                                   lkup_type);
7921                 if (status)
7922                         return status;
7923
7924                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7925                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
7926                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
7927                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
7928                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
7929                 tmp_fltr.fwd_id.hw_vsi_id =
7930                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
7931                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
7932                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
7933                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
7934
7935                 /* Update the previous switch rule of "MAC forward to VSI" to
7936                  * "MAC fwd to VSI list"
7937                  */
7938                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7939                 if (status) {
7940                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
7941                                   tmp_fltr.fwd_id.hw_vsi_id, status);
7942                         return status;
7943                 }
7944                 fm_list->vsi_list_info->ref_cnt--;
7945
7946                 /* Remove the VSI list since it is no longer used */
7947                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
7948                 if (status) {
7949                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
7950                                   vsi_list_id, status);
7951                         return status;
7952                 }
7953
7954                 LIST_DEL(&vsi_list_info->list_entry);
7955                 ice_free(hw, vsi_list_info);
7956                 fm_list->vsi_list_info = NULL;
7957         }
7958
7959         return status;
7960 }
7961
7962 /**
7963  * ice_rem_adv_rule - removes existing advanced switch rule
7964  * @hw: pointer to the hardware structure
7965  * @lkups: information on the words that needs to be looked up. All words
7966  *         together makes one recipe
7967  * @lkups_cnt: num of entries in the lkups array
7968  * @rinfo: Its the pointer to the rule information for the rule
7969  *
7970  * This function can be used to remove 1 rule at a time. The lkups is
7971  * used to describe all the words that forms the "lookup" portion of the
7972  * rule. These words can span multiple protocols. Callers to this function
7973  * need to pass in a list of protocol headers with lookup information along
7974  * and mask that determines which words are valid from the given protocol
7975  * header. rinfo describes other information related to this rule such as
7976  * forwarding IDs, priority of this rule, etc.
7977  */
7978 enum ice_status
7979 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7980                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
7981 {
7982         struct ice_adv_fltr_mgmt_list_entry *list_elem;
7983         struct ice_prot_lkup_ext lkup_exts;
7984         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
7985         enum ice_status status = ICE_SUCCESS;
7986         bool remove_rule = false;
7987         u16 i, rid, vsi_handle;
7988
7989         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
7990         for (i = 0; i < lkups_cnt; i++) {
7991                 u16 count;
7992
7993                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
7994                         return ICE_ERR_CFG;
7995
7996                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
7997                 if (!count)
7998                         return ICE_ERR_CFG;
7999         }
8000
8001         /* Create any special protocol/offset pairs, such as looking at tunnel
8002          * bits by extracting metadata
8003          */
8004         status = ice_add_special_words(rinfo, &lkup_exts);
8005         if (status)
8006                 return status;
8007
8008         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8009         /* If did not find a recipe that match the existing criteria */
8010         if (rid == ICE_MAX_NUM_RECIPES)
8011                 return ICE_ERR_PARAM;
8012
8013         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8014         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8015         /* the rule is already removed */
8016         if (!list_elem)
8017                 return ICE_SUCCESS;
8018         ice_acquire_lock(rule_lock);
8019         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8020                 remove_rule = true;
8021         } else if (list_elem->vsi_count > 1) {
8022                 remove_rule = false;
8023                 vsi_handle = rinfo->sw_act.vsi_handle;
8024                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8025         } else {
8026                 vsi_handle = rinfo->sw_act.vsi_handle;
8027                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8028                 if (status) {
8029                         ice_release_lock(rule_lock);
8030                         return status;
8031                 }
8032                 if (list_elem->vsi_count == 0)
8033                         remove_rule = true;
8034         }
8035         ice_release_lock(rule_lock);
8036         if (remove_rule) {
8037                 struct ice_aqc_sw_rules_elem *s_rule;
8038                 u16 rule_buf_sz;
8039
8040                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8041                 s_rule =
8042                         (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
8043                                                                    rule_buf_sz);
8044                 if (!s_rule)
8045                         return ICE_ERR_NO_MEMORY;
8046                 s_rule->pdata.lkup_tx_rx.act = 0;
8047                 s_rule->pdata.lkup_tx_rx.index =
8048                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8049                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8050                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8051                                          rule_buf_sz, 1,
8052                                          ice_aqc_opc_remove_sw_rules, NULL);
8053                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8054                         struct ice_switch_info *sw = hw->switch_info;
8055
8056                         ice_acquire_lock(rule_lock);
8057                         LIST_DEL(&list_elem->list_entry);
8058                         ice_free(hw, list_elem->lkups);
8059                         ice_free(hw, list_elem);
8060                         ice_release_lock(rule_lock);
8061                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8062                                 sw->recp_list[rid].adv_rule = false;
8063                 }
8064                 ice_free(hw, s_rule);
8065         }
8066         return status;
8067 }
8068
8069 /**
8070  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8071  * @hw: pointer to the hardware structure
8072  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8073  *
8074  * This function is used to remove 1 rule at a time. The removal is based on
8075  * the remove_entry parameter. This function will remove rule for a given
8076  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8077  */
8078 enum ice_status
8079 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8080                        struct ice_rule_query_data *remove_entry)
8081 {
8082         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8083         struct LIST_HEAD_TYPE *list_head;
8084         struct ice_adv_rule_info rinfo;
8085         struct ice_switch_info *sw;
8086
8087         sw = hw->switch_info;
8088         if (!sw->recp_list[remove_entry->rid].recp_created)
8089                 return ICE_ERR_PARAM;
8090         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8091         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8092                             list_entry) {
8093                 if (list_itr->rule_info.fltr_rule_id ==
8094                     remove_entry->rule_id) {
8095                         rinfo = list_itr->rule_info;
8096                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8097                         return ice_rem_adv_rule(hw, list_itr->lkups,
8098                                                 list_itr->lkups_cnt, &rinfo);
8099                 }
8100         }
8101         /* either list is empty or unable to find rule */
8102         return ICE_ERR_DOES_NOT_EXIST;
8103 }
8104
8105 /**
8106  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8107  *                       given VSI handle
8108  * @hw: pointer to the hardware structure
8109  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8110  *
8111  * This function is used to remove all the rules for a given VSI and as soon
8112  * as removing a rule fails, it will return immediately with the error code,
8113  * else it will return ICE_SUCCESS
8114  */
8115 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8116 {
8117         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8118         struct ice_vsi_list_map_info *map_info;
8119         struct LIST_HEAD_TYPE *list_head;
8120         struct ice_adv_rule_info rinfo;
8121         struct ice_switch_info *sw;
8122         enum ice_status status;
8123         u16 vsi_list_id = 0;
8124         u8 rid;
8125
8126         sw = hw->switch_info;
8127         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8128                 if (!sw->recp_list[rid].recp_created)
8129                         continue;
8130                 if (!sw->recp_list[rid].adv_rule)
8131                         continue;
8132                 list_head = &sw->recp_list[rid].filt_rules;
8133                 map_info = NULL;
8134                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
8135                                     ice_adv_fltr_mgmt_list_entry, list_entry) {
8136                         map_info = ice_find_vsi_list_entry(&sw->recp_list[rid],
8137                                                            vsi_handle,
8138                                                            &vsi_list_id);
8139                         if (!map_info)
8140                                 continue;
8141                         rinfo = list_itr->rule_info;
8142                         rinfo.sw_act.vsi_handle = vsi_handle;
8143                         status = ice_rem_adv_rule(hw, list_itr->lkups,
8144                                                   list_itr->lkups_cnt, &rinfo);
8145                         if (status)
8146                                 return status;
8147                         map_info = NULL;
8148                 }
8149         }
8150         return ICE_SUCCESS;
8151 }
8152
8153 /**
8154  * ice_replay_fltr - Replay all the filters stored by a specific list head
8155  * @hw: pointer to the hardware structure
8156  * @list_head: list for which filters needs to be replayed
8157  * @recp_id: Recipe ID for which rules need to be replayed
8158  */
8159 static enum ice_status
8160 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8161 {
8162         struct ice_fltr_mgmt_list_entry *itr;
8163         enum ice_status status = ICE_SUCCESS;
8164         struct ice_sw_recipe *recp_list;
8165         u8 lport = hw->port_info->lport;
8166         struct LIST_HEAD_TYPE l_head;
8167
8168         if (LIST_EMPTY(list_head))
8169                 return status;
8170
8171         recp_list = &hw->switch_info->recp_list[recp_id];
8172         /* Move entries from the given list_head to a temporary l_head so that
8173          * they can be replayed. Otherwise when trying to re-add the same
8174          * filter, the function will return already exists
8175          */
8176         LIST_REPLACE_INIT(list_head, &l_head);
8177
8178         /* Mark the given list_head empty by reinitializing it so filters
8179          * could be added again by *handler
8180          */
8181         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8182                             list_entry) {
8183                 struct ice_fltr_list_entry f_entry;
8184                 u16 vsi_handle;
8185
8186                 f_entry.fltr_info = itr->fltr_info;
8187                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8188                         status = ice_add_rule_internal(hw, recp_list, lport,
8189                                                        &f_entry);
8190                         if (status != ICE_SUCCESS)
8191                                 goto end;
8192                         continue;
8193                 }
8194
8195                 /* Add a filter per VSI separately */
8196                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8197                                      ICE_MAX_VSI) {
8198                         if (!ice_is_vsi_valid(hw, vsi_handle))
8199                                 break;
8200
8201                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8202                         f_entry.fltr_info.vsi_handle = vsi_handle;
8203                         f_entry.fltr_info.fwd_id.hw_vsi_id =
8204                                 ice_get_hw_vsi_num(hw, vsi_handle);
8205                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8206                         if (recp_id == ICE_SW_LKUP_VLAN)
8207                                 status = ice_add_vlan_internal(hw, recp_list,
8208                                                                &f_entry);
8209                         else
8210                                 status = ice_add_rule_internal(hw, recp_list,
8211                                                                lport,
8212                                                                &f_entry);
8213                         if (status != ICE_SUCCESS)
8214                                 goto end;
8215                 }
8216         }
8217 end:
8218         /* Clear the filter management list */
8219         ice_rem_sw_rule_info(hw, &l_head);
8220         return status;
8221 }
8222
8223 /**
8224  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8225  * @hw: pointer to the hardware structure
8226  *
8227  * NOTE: This function does not clean up partially added filters on error.
8228  * It is up to caller of the function to issue a reset or fail early.
8229  */
8230 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8231 {
8232         struct ice_switch_info *sw = hw->switch_info;
8233         enum ice_status status = ICE_SUCCESS;
8234         u8 i;
8235
8236         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8237                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8238
8239                 status = ice_replay_fltr(hw, i, head);
8240                 if (status != ICE_SUCCESS)
8241                         return status;
8242         }
8243         return status;
8244 }
8245
8246 /**
8247  * ice_replay_vsi_fltr - Replay filters for requested VSI
8248  * @hw: pointer to the hardware structure
8249  * @pi: pointer to port information structure
8250  * @sw: pointer to switch info struct for which function replays filters
8251  * @vsi_handle: driver VSI handle
8252  * @recp_id: Recipe ID for which rules need to be replayed
8253  * @list_head: list for which filters need to be replayed
8254  *
8255  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8256  * It is required to pass valid VSI handle.
8257  */
8258 static enum ice_status
8259 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8260                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8261                     struct LIST_HEAD_TYPE *list_head)
8262 {
8263         struct ice_fltr_mgmt_list_entry *itr;
8264         enum ice_status status = ICE_SUCCESS;
8265         struct ice_sw_recipe *recp_list;
8266         u16 hw_vsi_id;
8267
8268         if (LIST_EMPTY(list_head))
8269                 return status;
8270         recp_list = &sw->recp_list[recp_id];
8271         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8272
8273         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8274                             list_entry) {
8275                 struct ice_fltr_list_entry f_entry;
8276
8277                 f_entry.fltr_info = itr->fltr_info;
8278                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8279                     itr->fltr_info.vsi_handle == vsi_handle) {
8280                         /* update the src in case it is VSI num */
8281                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8282                                 f_entry.fltr_info.src = hw_vsi_id;
8283                         status = ice_add_rule_internal(hw, recp_list,
8284                                                        pi->lport,
8285                                                        &f_entry);
8286                         if (status != ICE_SUCCESS)
8287                                 goto end;
8288                         continue;
8289                 }
8290                 if (!itr->vsi_list_info ||
8291                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8292                         continue;
8293                 /* Clearing it so that the logic can add it back */
8294                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8295                 f_entry.fltr_info.vsi_handle = vsi_handle;
8296                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8297                 /* update the src in case it is VSI num */
8298                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8299                         f_entry.fltr_info.src = hw_vsi_id;
8300                 if (recp_id == ICE_SW_LKUP_VLAN)
8301                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8302                 else
8303                         status = ice_add_rule_internal(hw, recp_list,
8304                                                        pi->lport,
8305                                                        &f_entry);
8306                 if (status != ICE_SUCCESS)
8307                         goto end;
8308         }
8309 end:
8310         return status;
8311 }
8312
8313 /**
8314  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8315  * @hw: pointer to the hardware structure
8316  * @vsi_handle: driver VSI handle
8317  * @list_head: list for which filters need to be replayed
8318  *
8319  * Replay the advanced rule for the given VSI.
8320  */
8321 static enum ice_status
8322 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8323                         struct LIST_HEAD_TYPE *list_head)
8324 {
8325         struct ice_rule_query_data added_entry = { 0 };
8326         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8327         enum ice_status status = ICE_SUCCESS;
8328
8329         if (LIST_EMPTY(list_head))
8330                 return status;
8331         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8332                             list_entry) {
8333                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8334                 u16 lk_cnt = adv_fltr->lkups_cnt;
8335
8336                 if (vsi_handle != rinfo->sw_act.vsi_handle)
8337                         continue;
8338                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8339                                           &added_entry);
8340                 if (status)
8341                         break;
8342         }
8343         return status;
8344 }
8345
8346 /**
8347  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8348  * @hw: pointer to the hardware structure
8349  * @pi: pointer to port information structure
8350  * @vsi_handle: driver VSI handle
8351  *
8352  * Replays filters for requested VSI via vsi_handle.
8353  */
8354 enum ice_status
8355 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8356                         u16 vsi_handle)
8357 {
8358         struct ice_switch_info *sw = hw->switch_info;
8359         enum ice_status status;
8360         u8 i;
8361
8362         /* Update the recipes that were created */
8363         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8364                 struct LIST_HEAD_TYPE *head;
8365
8366                 head = &sw->recp_list[i].filt_replay_rules;
8367                 if (!sw->recp_list[i].adv_rule)
8368                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8369                                                      head);
8370                 else
8371                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8372                 if (status != ICE_SUCCESS)
8373                         return status;
8374         }
8375
8376         return ICE_SUCCESS;
8377 }
8378
8379 /**
8380  * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8381  * @hw: pointer to the HW struct
8382  * @sw: pointer to switch info struct for which function removes filters
8383  *
8384  * Deletes the filter replay rules for given switch
8385  */
8386 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8387 {
8388         u8 i;
8389
8390         if (!sw)
8391                 return;
8392
8393         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8394                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8395                         struct LIST_HEAD_TYPE *l_head;
8396
8397                         l_head = &sw->recp_list[i].filt_replay_rules;
8398                         if (!sw->recp_list[i].adv_rule)
8399                                 ice_rem_sw_rule_info(hw, l_head);
8400                         else
8401                                 ice_rem_adv_rule_info(hw, l_head);
8402                 }
8403         }
8404 }
8405
8406 /**
8407  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8408  * @hw: pointer to the HW struct
8409  *
8410  * Deletes the filter replay rules.
8411  */
8412 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8413 {
8414         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
8415 }