net/ice/base: change dummy packets with VLAN
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9 #define ICE_ETH_DA_OFFSET               0
10 #define ICE_ETH_ETHTYPE_OFFSET          12
11 #define ICE_ETH_VLAN_TCI_OFFSET         14
12 #define ICE_MAX_VLAN_ID                 0xFFF
13 #define ICE_IPV6_ETHER_ID               0x86DD
14 #define ICE_IPV4_NVGRE_PROTO_ID         0x002F
15 #define ICE_PPP_IPV6_PROTO_ID           0x0057
16 #define ICE_TCP_PROTO_ID                0x06
17 #define ICE_GTPU_PROFILE                24
18 #define ICE_ETH_P_8021Q                 0x8100
19 #define ICE_MPLS_ETHER_ID               0x8847
20
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22  * struct to configure any switch filter rules.
23  * {DA (6 bytes), SA(6 bytes),
24  * Ether type (2 bytes for header without VLAN tag) OR
25  * VLAN tag (4 bytes for header with VLAN tag) }
26  *
27  * Word on Hardcoded values
28  * byte 0 = 0x2: to identify it as locally administered DA MAC
29  * byte 6 = 0x2: to identify it as locally administered SA MAC
30  * byte 12 = 0x81 & byte 13 = 0x00:
31  *      In case of VLAN filter first two bytes defines ether type (0x8100)
32  *      and remaining two bytes are placeholder for programming a given VLAN ID
33  *      In case of Ether type filter it is treated as header without VLAN tag
34  *      and byte 12 and 13 is used to program a given Ether type instead
35  */
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37                                                         0x2, 0, 0, 0, 0, 0,
38                                                         0x81, 0, 0, 0};
39
40 struct ice_dummy_pkt_offsets {
41         enum ice_protocol_type type;
42         u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 };
44
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46         { ICE_MAC_OFOS,         0 },
47         { ICE_ETYPE_OL,         12 },
48         { ICE_IPV4_OFOS,        14 },
49         { ICE_NVGRE,            34 },
50         { ICE_MAC_IL,           42 },
51         { ICE_IPV4_IL,          56 },
52         { ICE_TCP_IL,           76 },
53         { ICE_PROTOCOL_LAST,    0 },
54 };
55
56 static const u8 dummy_gre_tcp_packet[] = {
57         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
58         0x00, 0x00, 0x00, 0x00,
59         0x00, 0x00, 0x00, 0x00,
60
61         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
62
63         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
64         0x00, 0x00, 0x00, 0x00,
65         0x00, 0x2F, 0x00, 0x00,
66         0x00, 0x00, 0x00, 0x00,
67         0x00, 0x00, 0x00, 0x00,
68
69         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
70         0x00, 0x00, 0x00, 0x00,
71
72         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
73         0x00, 0x00, 0x00, 0x00,
74         0x00, 0x00, 0x00, 0x00,
75         0x08, 0x00,
76
77         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
78         0x00, 0x00, 0x00, 0x00,
79         0x00, 0x06, 0x00, 0x00,
80         0x00, 0x00, 0x00, 0x00,
81         0x00, 0x00, 0x00, 0x00,
82
83         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
84         0x00, 0x00, 0x00, 0x00,
85         0x00, 0x00, 0x00, 0x00,
86         0x50, 0x02, 0x20, 0x00,
87         0x00, 0x00, 0x00, 0x00
88 };
89
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91         { ICE_MAC_OFOS,         0 },
92         { ICE_ETYPE_OL,         12 },
93         { ICE_IPV4_OFOS,        14 },
94         { ICE_NVGRE,            34 },
95         { ICE_MAC_IL,           42 },
96         { ICE_IPV4_IL,          56 },
97         { ICE_UDP_ILOS,         76 },
98         { ICE_PROTOCOL_LAST,    0 },
99 };
100
101 static const u8 dummy_gre_udp_packet[] = {
102         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
103         0x00, 0x00, 0x00, 0x00,
104         0x00, 0x00, 0x00, 0x00,
105
106         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
107
108         0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
109         0x00, 0x00, 0x00, 0x00,
110         0x00, 0x2F, 0x00, 0x00,
111         0x00, 0x00, 0x00, 0x00,
112         0x00, 0x00, 0x00, 0x00,
113
114         0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
115         0x00, 0x00, 0x00, 0x00,
116
117         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
118         0x00, 0x00, 0x00, 0x00,
119         0x00, 0x00, 0x00, 0x00,
120         0x08, 0x00,
121
122         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
123         0x00, 0x00, 0x00, 0x00,
124         0x00, 0x11, 0x00, 0x00,
125         0x00, 0x00, 0x00, 0x00,
126         0x00, 0x00, 0x00, 0x00,
127
128         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
129         0x00, 0x08, 0x00, 0x00,
130 };
131
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
133         { ICE_MAC_OFOS,         0 },
134         { ICE_ETYPE_OL,         12 },
135         { ICE_IPV4_OFOS,        14 },
136         { ICE_UDP_OF,           34 },
137         { ICE_VXLAN,            42 },
138         { ICE_GENEVE,           42 },
139         { ICE_VXLAN_GPE,        42 },
140         { ICE_MAC_IL,           50 },
141         { ICE_IPV4_IL,          64 },
142         { ICE_TCP_IL,           84 },
143         { ICE_PROTOCOL_LAST,    0 },
144 };
145
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
148         0x00, 0x00, 0x00, 0x00,
149         0x00, 0x00, 0x00, 0x00,
150
151         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
152
153         0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154         0x00, 0x01, 0x00, 0x00,
155         0x40, 0x11, 0x00, 0x00,
156         0x00, 0x00, 0x00, 0x00,
157         0x00, 0x00, 0x00, 0x00,
158
159         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160         0x00, 0x46, 0x00, 0x00,
161
162         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163         0x00, 0x00, 0x00, 0x00,
164
165         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166         0x00, 0x00, 0x00, 0x00,
167         0x00, 0x00, 0x00, 0x00,
168         0x08, 0x00,
169
170         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171         0x00, 0x01, 0x00, 0x00,
172         0x40, 0x06, 0x00, 0x00,
173         0x00, 0x00, 0x00, 0x00,
174         0x00, 0x00, 0x00, 0x00,
175
176         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177         0x00, 0x00, 0x00, 0x00,
178         0x00, 0x00, 0x00, 0x00,
179         0x50, 0x02, 0x20, 0x00,
180         0x00, 0x00, 0x00, 0x00
181 };
182
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
184         { ICE_MAC_OFOS,         0 },
185         { ICE_ETYPE_OL,         12 },
186         { ICE_IPV4_OFOS,        14 },
187         { ICE_UDP_OF,           34 },
188         { ICE_VXLAN,            42 },
189         { ICE_GENEVE,           42 },
190         { ICE_VXLAN_GPE,        42 },
191         { ICE_MAC_IL,           50 },
192         { ICE_IPV4_IL,          64 },
193         { ICE_UDP_ILOS,         84 },
194         { ICE_PROTOCOL_LAST,    0 },
195 };
196
197 static const u8 dummy_udp_tun_udp_packet[] = {
198         0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
199         0x00, 0x00, 0x00, 0x00,
200         0x00, 0x00, 0x00, 0x00,
201
202         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
203
204         0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205         0x00, 0x01, 0x00, 0x00,
206         0x00, 0x11, 0x00, 0x00,
207         0x00, 0x00, 0x00, 0x00,
208         0x00, 0x00, 0x00, 0x00,
209
210         0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211         0x00, 0x3a, 0x00, 0x00,
212
213         0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214         0x00, 0x00, 0x00, 0x00,
215
216         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217         0x00, 0x00, 0x00, 0x00,
218         0x00, 0x00, 0x00, 0x00,
219         0x08, 0x00,
220
221         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222         0x00, 0x01, 0x00, 0x00,
223         0x00, 0x11, 0x00, 0x00,
224         0x00, 0x00, 0x00, 0x00,
225         0x00, 0x00, 0x00, 0x00,
226
227         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228         0x00, 0x08, 0x00, 0x00,
229 };
230
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
233         { ICE_MAC_OFOS,         0 },
234         { ICE_ETYPE_OL,         12 },
235         { ICE_IPV4_OFOS,        14 },
236         { ICE_UDP_ILOS,         34 },
237         { ICE_PROTOCOL_LAST,    0 },
238 };
239
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243         0x00, 0x00, 0x00, 0x00,
244         0x00, 0x00, 0x00, 0x00,
245
246         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
247
248         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249         0x00, 0x01, 0x00, 0x00,
250         0x00, 0x11, 0x00, 0x00,
251         0x00, 0x00, 0x00, 0x00,
252         0x00, 0x00, 0x00, 0x00,
253
254         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255         0x00, 0x08, 0x00, 0x00,
256
257         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
258 };
259
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
262         { ICE_MAC_OFOS,         0 },
263         { ICE_VLAN_OFOS,        12 },
264         { ICE_ETYPE_OL,         16 },
265         { ICE_IPV4_OFOS,        18 },
266         { ICE_UDP_ILOS,         38 },
267         { ICE_PROTOCOL_LAST,    0 },
268 };
269
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273         0x00, 0x00, 0x00, 0x00,
274         0x00, 0x00, 0x00, 0x00,
275
276         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
277
278         0x08, 0x00,             /* ICE_ETYPE_OL 16 */
279
280         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281         0x00, 0x01, 0x00, 0x00,
282         0x00, 0x11, 0x00, 0x00,
283         0x00, 0x00, 0x00, 0x00,
284         0x00, 0x00, 0x00, 0x00,
285
286         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287         0x00, 0x08, 0x00, 0x00,
288
289         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
290 };
291
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
294         { ICE_MAC_OFOS,         0 },
295         { ICE_ETYPE_OL,         12 },
296         { ICE_IPV4_OFOS,        14 },
297         { ICE_TCP_IL,           34 },
298         { ICE_PROTOCOL_LAST,    0 },
299 };
300
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304         0x00, 0x00, 0x00, 0x00,
305         0x00, 0x00, 0x00, 0x00,
306
307         0x08, 0x00,             /* ICE_ETYPE_OL 12 */
308
309         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310         0x00, 0x01, 0x00, 0x00,
311         0x00, 0x06, 0x00, 0x00,
312         0x00, 0x00, 0x00, 0x00,
313         0x00, 0x00, 0x00, 0x00,
314
315         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316         0x00, 0x00, 0x00, 0x00,
317         0x00, 0x00, 0x00, 0x00,
318         0x50, 0x00, 0x00, 0x00,
319         0x00, 0x00, 0x00, 0x00,
320
321         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
322 };
323
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
326         { ICE_MAC_OFOS,         0 },
327         { ICE_ETYPE_OL,         12 },
328         { ICE_PROTOCOL_LAST,    0 },
329 };
330
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334         0x00, 0x00, 0x00, 0x00,
335         0x00, 0x00, 0x00, 0x00,
336
337         0x88, 0x47,             /* ICE_ETYPE_OL 12 */
338         0x00, 0x00, 0x01, 0x00,
339
340         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
341 };
342
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
345         { ICE_MAC_OFOS,         0 },
346         { ICE_VLAN_OFOS,        12 },
347         { ICE_ETYPE_OL,         16 },
348         { ICE_IPV4_OFOS,        18 },
349         { ICE_TCP_IL,           38 },
350         { ICE_PROTOCOL_LAST,    0 },
351 };
352
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356         0x00, 0x00, 0x00, 0x00,
357         0x00, 0x00, 0x00, 0x00,
358
359         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
360
361         0x08, 0x00,             /* ICE_ETYPE_OL 16 */
362
363         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364         0x00, 0x01, 0x00, 0x00,
365         0x00, 0x06, 0x00, 0x00,
366         0x00, 0x00, 0x00, 0x00,
367         0x00, 0x00, 0x00, 0x00,
368
369         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370         0x00, 0x00, 0x00, 0x00,
371         0x00, 0x00, 0x00, 0x00,
372         0x50, 0x00, 0x00, 0x00,
373         0x00, 0x00, 0x00, 0x00,
374
375         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
376 };
377
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
379         { ICE_MAC_OFOS,         0 },
380         { ICE_ETYPE_OL,         12 },
381         { ICE_IPV6_OFOS,        14 },
382         { ICE_TCP_IL,           54 },
383         { ICE_PROTOCOL_LAST,    0 },
384 };
385
386 static const u8 dummy_tcp_ipv6_packet[] = {
387         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388         0x00, 0x00, 0x00, 0x00,
389         0x00, 0x00, 0x00, 0x00,
390
391         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
392
393         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395         0x00, 0x00, 0x00, 0x00,
396         0x00, 0x00, 0x00, 0x00,
397         0x00, 0x00, 0x00, 0x00,
398         0x00, 0x00, 0x00, 0x00,
399         0x00, 0x00, 0x00, 0x00,
400         0x00, 0x00, 0x00, 0x00,
401         0x00, 0x00, 0x00, 0x00,
402         0x00, 0x00, 0x00, 0x00,
403
404         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405         0x00, 0x00, 0x00, 0x00,
406         0x00, 0x00, 0x00, 0x00,
407         0x50, 0x00, 0x00, 0x00,
408         0x00, 0x00, 0x00, 0x00,
409
410         0x00, 0x00, /* 2 bytes for 4 byte alignment */
411 };
412
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
416         { ICE_MAC_OFOS,         0 },
417         { ICE_VLAN_OFOS,        12 },
418         { ICE_ETYPE_OL,         16 },
419         { ICE_IPV6_OFOS,        18 },
420         { ICE_TCP_IL,           58 },
421         { ICE_PROTOCOL_LAST,    0 },
422 };
423
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427         0x00, 0x00, 0x00, 0x00,
428         0x00, 0x00, 0x00, 0x00,
429
430         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
431
432         0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
433
434         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436         0x00, 0x00, 0x00, 0x00,
437         0x00, 0x00, 0x00, 0x00,
438         0x00, 0x00, 0x00, 0x00,
439         0x00, 0x00, 0x00, 0x00,
440         0x00, 0x00, 0x00, 0x00,
441         0x00, 0x00, 0x00, 0x00,
442         0x00, 0x00, 0x00, 0x00,
443         0x00, 0x00, 0x00, 0x00,
444
445         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446         0x00, 0x00, 0x00, 0x00,
447         0x00, 0x00, 0x00, 0x00,
448         0x50, 0x00, 0x00, 0x00,
449         0x00, 0x00, 0x00, 0x00,
450
451         0x00, 0x00, /* 2 bytes for 4 byte alignment */
452 };
453
454 /* IPv6 + UDP */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
456         { ICE_MAC_OFOS,         0 },
457         { ICE_ETYPE_OL,         12 },
458         { ICE_IPV6_OFOS,        14 },
459         { ICE_UDP_ILOS,         54 },
460         { ICE_PROTOCOL_LAST,    0 },
461 };
462
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466         0x00, 0x00, 0x00, 0x00,
467         0x00, 0x00, 0x00, 0x00,
468
469         0x86, 0xDD,             /* ICE_ETYPE_OL 12 */
470
471         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473         0x00, 0x00, 0x00, 0x00,
474         0x00, 0x00, 0x00, 0x00,
475         0x00, 0x00, 0x00, 0x00,
476         0x00, 0x00, 0x00, 0x00,
477         0x00, 0x00, 0x00, 0x00,
478         0x00, 0x00, 0x00, 0x00,
479         0x00, 0x00, 0x00, 0x00,
480         0x00, 0x00, 0x00, 0x00,
481
482         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483         0x00, 0x10, 0x00, 0x00,
484
485         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486         0x00, 0x00, 0x00, 0x00,
487
488         0x00, 0x00, /* 2 bytes for 4 byte alignment */
489 };
490
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
494         { ICE_MAC_OFOS,         0 },
495         { ICE_VLAN_OFOS,        12 },
496         { ICE_ETYPE_OL,         16 },
497         { ICE_IPV6_OFOS,        18 },
498         { ICE_UDP_ILOS,         58 },
499         { ICE_PROTOCOL_LAST,    0 },
500 };
501
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505         0x00, 0x00, 0x00, 0x00,
506         0x00, 0x00, 0x00, 0x00,
507
508         0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
509
510         0x86, 0xDD,             /* ICE_ETYPE_OL 16 */
511
512         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513         0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514         0x00, 0x00, 0x00, 0x00,
515         0x00, 0x00, 0x00, 0x00,
516         0x00, 0x00, 0x00, 0x00,
517         0x00, 0x00, 0x00, 0x00,
518         0x00, 0x00, 0x00, 0x00,
519         0x00, 0x00, 0x00, 0x00,
520         0x00, 0x00, 0x00, 0x00,
521         0x00, 0x00, 0x00, 0x00,
522
523         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524         0x00, 0x08, 0x00, 0x00,
525
526         0x00, 0x00, /* 2 bytes for 4 byte alignment */
527 };
528
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
531         { ICE_MAC_OFOS,         0 },
532         { ICE_IPV4_OFOS,        14 },
533         { ICE_UDP_OF,           34 },
534         { ICE_GTP,              42 },
535         { ICE_IPV4_IL,          62 },
536         { ICE_TCP_IL,           82 },
537         { ICE_PROTOCOL_LAST,    0 },
538 };
539
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542         0x00, 0x00, 0x00, 0x00,
543         0x00, 0x00, 0x00, 0x00,
544         0x08, 0x00,
545
546         0x45, 0x00, 0x00, 0x58, /* IP 14 */
547         0x00, 0x00, 0x00, 0x00,
548         0x00, 0x11, 0x00, 0x00,
549         0x00, 0x00, 0x00, 0x00,
550         0x00, 0x00, 0x00, 0x00,
551
552         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553         0x00, 0x44, 0x00, 0x00,
554
555         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556         0x00, 0x00, 0x00, 0x00,
557         0x00, 0x00, 0x00, 0x85,
558
559         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560         0x00, 0x00, 0x00, 0x00,
561
562         0x45, 0x00, 0x00, 0x28, /* IP 62 */
563         0x00, 0x00, 0x00, 0x00,
564         0x00, 0x06, 0x00, 0x00,
565         0x00, 0x00, 0x00, 0x00,
566         0x00, 0x00, 0x00, 0x00,
567
568         0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569         0x00, 0x00, 0x00, 0x00,
570         0x00, 0x00, 0x00, 0x00,
571         0x50, 0x00, 0x00, 0x00,
572         0x00, 0x00, 0x00, 0x00,
573
574         0x00, 0x00, /* 2 bytes for 4 byte alignment */
575 };
576
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
579         { ICE_MAC_OFOS,         0 },
580         { ICE_IPV4_OFOS,        14 },
581         { ICE_UDP_OF,           34 },
582         { ICE_GTP,              42 },
583         { ICE_IPV4_IL,          62 },
584         { ICE_UDP_ILOS,         82 },
585         { ICE_PROTOCOL_LAST,    0 },
586 };
587
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590         0x00, 0x00, 0x00, 0x00,
591         0x00, 0x00, 0x00, 0x00,
592         0x08, 0x00,
593
594         0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595         0x00, 0x00, 0x00, 0x00,
596         0x00, 0x11, 0x00, 0x00,
597         0x00, 0x00, 0x00, 0x00,
598         0x00, 0x00, 0x00, 0x00,
599
600         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601         0x00, 0x38, 0x00, 0x00,
602
603         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604         0x00, 0x00, 0x00, 0x00,
605         0x00, 0x00, 0x00, 0x85,
606
607         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608         0x00, 0x00, 0x00, 0x00,
609
610         0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611         0x00, 0x00, 0x00, 0x00,
612         0x00, 0x11, 0x00, 0x00,
613         0x00, 0x00, 0x00, 0x00,
614         0x00, 0x00, 0x00, 0x00,
615
616         0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617         0x00, 0x08, 0x00, 0x00,
618
619         0x00, 0x00, /* 2 bytes for 4 byte alignment */
620 };
621
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
624         { ICE_MAC_OFOS,         0 },
625         { ICE_IPV4_OFOS,        14 },
626         { ICE_UDP_OF,           34 },
627         { ICE_GTP,              42 },
628         { ICE_IPV6_IL,          62 },
629         { ICE_TCP_IL,           102 },
630         { ICE_PROTOCOL_LAST,    0 },
631 };
632
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635         0x00, 0x00, 0x00, 0x00,
636         0x00, 0x00, 0x00, 0x00,
637         0x08, 0x00,
638
639         0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640         0x00, 0x00, 0x00, 0x00,
641         0x00, 0x11, 0x00, 0x00,
642         0x00, 0x00, 0x00, 0x00,
643         0x00, 0x00, 0x00, 0x00,
644
645         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646         0x00, 0x58, 0x00, 0x00,
647
648         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649         0x00, 0x00, 0x00, 0x00,
650         0x00, 0x00, 0x00, 0x85,
651
652         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653         0x00, 0x00, 0x00, 0x00,
654
655         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656         0x00, 0x14, 0x06, 0x00,
657         0x00, 0x00, 0x00, 0x00,
658         0x00, 0x00, 0x00, 0x00,
659         0x00, 0x00, 0x00, 0x00,
660         0x00, 0x00, 0x00, 0x00,
661         0x00, 0x00, 0x00, 0x00,
662         0x00, 0x00, 0x00, 0x00,
663         0x00, 0x00, 0x00, 0x00,
664         0x00, 0x00, 0x00, 0x00,
665
666         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667         0x00, 0x00, 0x00, 0x00,
668         0x00, 0x00, 0x00, 0x00,
669         0x50, 0x00, 0x00, 0x00,
670         0x00, 0x00, 0x00, 0x00,
671
672         0x00, 0x00, /* 2 bytes for 4 byte alignment */
673 };
674
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
676         { ICE_MAC_OFOS,         0 },
677         { ICE_IPV4_OFOS,        14 },
678         { ICE_UDP_OF,           34 },
679         { ICE_GTP,              42 },
680         { ICE_IPV6_IL,          62 },
681         { ICE_UDP_ILOS,         102 },
682         { ICE_PROTOCOL_LAST,    0 },
683 };
684
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687         0x00, 0x00, 0x00, 0x00,
688         0x00, 0x00, 0x00, 0x00,
689         0x08, 0x00,
690
691         0x45, 0x00, 0x00, 0x60, /* IP 14 */
692         0x00, 0x00, 0x00, 0x00,
693         0x00, 0x11, 0x00, 0x00,
694         0x00, 0x00, 0x00, 0x00,
695         0x00, 0x00, 0x00, 0x00,
696
697         0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698         0x00, 0x4c, 0x00, 0x00,
699
700         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701         0x00, 0x00, 0x00, 0x00,
702         0x00, 0x00, 0x00, 0x85,
703
704         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705         0x00, 0x00, 0x00, 0x00,
706
707         0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708         0x00, 0x08, 0x11, 0x00,
709         0x00, 0x00, 0x00, 0x00,
710         0x00, 0x00, 0x00, 0x00,
711         0x00, 0x00, 0x00, 0x00,
712         0x00, 0x00, 0x00, 0x00,
713         0x00, 0x00, 0x00, 0x00,
714         0x00, 0x00, 0x00, 0x00,
715         0x00, 0x00, 0x00, 0x00,
716         0x00, 0x00, 0x00, 0x00,
717
718         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719         0x00, 0x08, 0x00, 0x00,
720
721         0x00, 0x00, /* 2 bytes for 4 byte alignment */
722 };
723
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
725         { ICE_MAC_OFOS,         0 },
726         { ICE_IPV6_OFOS,        14 },
727         { ICE_UDP_OF,           54 },
728         { ICE_GTP,              62 },
729         { ICE_IPV4_IL,          82 },
730         { ICE_TCP_IL,           102 },
731         { ICE_PROTOCOL_LAST,    0 },
732 };
733
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736         0x00, 0x00, 0x00, 0x00,
737         0x00, 0x00, 0x00, 0x00,
738         0x86, 0xdd,
739
740         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741         0x00, 0x44, 0x11, 0x00,
742         0x00, 0x00, 0x00, 0x00,
743         0x00, 0x00, 0x00, 0x00,
744         0x00, 0x00, 0x00, 0x00,
745         0x00, 0x00, 0x00, 0x00,
746         0x00, 0x00, 0x00, 0x00,
747         0x00, 0x00, 0x00, 0x00,
748         0x00, 0x00, 0x00, 0x00,
749         0x00, 0x00, 0x00, 0x00,
750
751         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752         0x00, 0x44, 0x00, 0x00,
753
754         0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755         0x00, 0x00, 0x00, 0x00,
756         0x00, 0x00, 0x00, 0x85,
757
758         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759         0x00, 0x00, 0x00, 0x00,
760
761         0x45, 0x00, 0x00, 0x28, /* IP 82 */
762         0x00, 0x00, 0x00, 0x00,
763         0x00, 0x06, 0x00, 0x00,
764         0x00, 0x00, 0x00, 0x00,
765         0x00, 0x00, 0x00, 0x00,
766
767         0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768         0x00, 0x00, 0x00, 0x00,
769         0x00, 0x00, 0x00, 0x00,
770         0x50, 0x00, 0x00, 0x00,
771         0x00, 0x00, 0x00, 0x00,
772
773         0x00, 0x00, /* 2 bytes for 4 byte alignment */
774 };
775
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
777         { ICE_MAC_OFOS,         0 },
778         { ICE_IPV6_OFOS,        14 },
779         { ICE_UDP_OF,           54 },
780         { ICE_GTP,              62 },
781         { ICE_IPV4_IL,          82 },
782         { ICE_UDP_ILOS,         102 },
783         { ICE_PROTOCOL_LAST,    0 },
784 };
785
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788         0x00, 0x00, 0x00, 0x00,
789         0x00, 0x00, 0x00, 0x00,
790         0x86, 0xdd,
791
792         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793         0x00, 0x38, 0x11, 0x00,
794         0x00, 0x00, 0x00, 0x00,
795         0x00, 0x00, 0x00, 0x00,
796         0x00, 0x00, 0x00, 0x00,
797         0x00, 0x00, 0x00, 0x00,
798         0x00, 0x00, 0x00, 0x00,
799         0x00, 0x00, 0x00, 0x00,
800         0x00, 0x00, 0x00, 0x00,
801         0x00, 0x00, 0x00, 0x00,
802
803         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804         0x00, 0x38, 0x00, 0x00,
805
806         0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807         0x00, 0x00, 0x00, 0x00,
808         0x00, 0x00, 0x00, 0x85,
809
810         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811         0x00, 0x00, 0x00, 0x00,
812
813         0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814         0x00, 0x00, 0x00, 0x00,
815         0x00, 0x11, 0x00, 0x00,
816         0x00, 0x00, 0x00, 0x00,
817         0x00, 0x00, 0x00, 0x00,
818
819         0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820         0x00, 0x08, 0x00, 0x00,
821
822         0x00, 0x00, /* 2 bytes for 4 byte alignment */
823 };
824
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
826         { ICE_MAC_OFOS,         0 },
827         { ICE_IPV6_OFOS,        14 },
828         { ICE_UDP_OF,           54 },
829         { ICE_GTP,              62 },
830         { ICE_IPV6_IL,          82 },
831         { ICE_TCP_IL,           122 },
832         { ICE_PROTOCOL_LAST,    0 },
833 };
834
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837         0x00, 0x00, 0x00, 0x00,
838         0x00, 0x00, 0x00, 0x00,
839         0x86, 0xdd,
840
841         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842         0x00, 0x58, 0x11, 0x00,
843         0x00, 0x00, 0x00, 0x00,
844         0x00, 0x00, 0x00, 0x00,
845         0x00, 0x00, 0x00, 0x00,
846         0x00, 0x00, 0x00, 0x00,
847         0x00, 0x00, 0x00, 0x00,
848         0x00, 0x00, 0x00, 0x00,
849         0x00, 0x00, 0x00, 0x00,
850         0x00, 0x00, 0x00, 0x00,
851
852         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853         0x00, 0x58, 0x00, 0x00,
854
855         0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856         0x00, 0x00, 0x00, 0x00,
857         0x00, 0x00, 0x00, 0x85,
858
859         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860         0x00, 0x00, 0x00, 0x00,
861
862         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863         0x00, 0x14, 0x06, 0x00,
864         0x00, 0x00, 0x00, 0x00,
865         0x00, 0x00, 0x00, 0x00,
866         0x00, 0x00, 0x00, 0x00,
867         0x00, 0x00, 0x00, 0x00,
868         0x00, 0x00, 0x00, 0x00,
869         0x00, 0x00, 0x00, 0x00,
870         0x00, 0x00, 0x00, 0x00,
871         0x00, 0x00, 0x00, 0x00,
872
873         0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874         0x00, 0x00, 0x00, 0x00,
875         0x00, 0x00, 0x00, 0x00,
876         0x50, 0x00, 0x00, 0x00,
877         0x00, 0x00, 0x00, 0x00,
878
879         0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
883         { ICE_MAC_OFOS,         0 },
884         { ICE_IPV6_OFOS,        14 },
885         { ICE_UDP_OF,           54 },
886         { ICE_GTP,              62 },
887         { ICE_IPV6_IL,          82 },
888         { ICE_UDP_ILOS,         122 },
889         { ICE_PROTOCOL_LAST,    0 },
890 };
891
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893         0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894         0x00, 0x00, 0x00, 0x00,
895         0x00, 0x00, 0x00, 0x00,
896         0x86, 0xdd,
897
898         0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899         0x00, 0x4c, 0x11, 0x00,
900         0x00, 0x00, 0x00, 0x00,
901         0x00, 0x00, 0x00, 0x00,
902         0x00, 0x00, 0x00, 0x00,
903         0x00, 0x00, 0x00, 0x00,
904         0x00, 0x00, 0x00, 0x00,
905         0x00, 0x00, 0x00, 0x00,
906         0x00, 0x00, 0x00, 0x00,
907         0x00, 0x00, 0x00, 0x00,
908
909         0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910         0x00, 0x4c, 0x00, 0x00,
911
912         0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913         0x00, 0x00, 0x00, 0x00,
914         0x00, 0x00, 0x00, 0x85,
915
916         0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917         0x00, 0x00, 0x00, 0x00,
918
919         0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920         0x00, 0x08, 0x11, 0x00,
921         0x00, 0x00, 0x00, 0x00,
922         0x00, 0x00, 0x00, 0x00,
923         0x00, 0x00, 0x00, 0x00,
924         0x00, 0x00, 0x00, 0x00,
925         0x00, 0x00, 0x00, 0x00,
926         0x00, 0x00, 0x00, 0x00,
927         0x00, 0x00, 0x00, 0x00,
928         0x00, 0x00, 0x00, 0x00,
929
930         0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931         0x00, 0x08, 0x00, 0x00,
932
933         0x00, 0x00, /* 2 bytes for 4 byte alignment */
934 };
935
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
937         { ICE_MAC_OFOS,         0 },
938         { ICE_IPV4_OFOS,        14 },
939         { ICE_UDP_OF,           34 },
940         { ICE_GTP,              42 },
941         { ICE_IPV4_IL,          62 },
942         { ICE_PROTOCOL_LAST,    0 },
943 };
944
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947         0x00, 0x00, 0x00, 0x00,
948         0x00, 0x00, 0x00, 0x00,
949         0x08, 0x00,
950
951         0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952         0x00, 0x00, 0x40, 0x00,
953         0x40, 0x11, 0x00, 0x00,
954         0x00, 0x00, 0x00, 0x00,
955         0x00, 0x00, 0x00, 0x00,
956
957         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958         0x00, 0x00, 0x00, 0x00,
959
960         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
961         0x00, 0x00, 0x00, 0x00,
962         0x00, 0x00, 0x00, 0x85,
963
964         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965         0x00, 0x00, 0x00, 0x00,
966
967         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968         0x00, 0x00, 0x40, 0x00,
969         0x40, 0x00, 0x00, 0x00,
970         0x00, 0x00, 0x00, 0x00,
971         0x00, 0x00, 0x00, 0x00,
972         0x00, 0x00,
973 };
974
975 static const
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
977         { ICE_MAC_OFOS,         0 },
978         { ICE_IPV4_OFOS,        14 },
979         { ICE_UDP_OF,           34 },
980         { ICE_GTP,              42 },
981         { ICE_IPV6_IL,          62 },
982         { ICE_PROTOCOL_LAST,    0 },
983 };
984
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987         0x00, 0x00, 0x00, 0x00,
988         0x00, 0x00, 0x00, 0x00,
989         0x08, 0x00,
990
991         0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992         0x00, 0x00, 0x40, 0x00,
993         0x40, 0x11, 0x00, 0x00,
994         0x00, 0x00, 0x00, 0x00,
995         0x00, 0x00, 0x00, 0x00,
996
997         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998         0x00, 0x00, 0x00, 0x00,
999
1000         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
1001         0x00, 0x00, 0x00, 0x00,
1002         0x00, 0x00, 0x00, 0x85,
1003
1004         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005         0x00, 0x00, 0x00, 0x00,
1006
1007         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008         0x00, 0x00, 0x3b, 0x00,
1009         0x00, 0x00, 0x00, 0x00,
1010         0x00, 0x00, 0x00, 0x00,
1011         0x00, 0x00, 0x00, 0x00,
1012         0x00, 0x00, 0x00, 0x00,
1013         0x00, 0x00, 0x00, 0x00,
1014         0x00, 0x00, 0x00, 0x00,
1015         0x00, 0x00, 0x00, 0x00,
1016         0x00, 0x00, 0x00, 0x00,
1017
1018         0x00, 0x00,
1019 };
1020
1021 static const
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023         { ICE_MAC_OFOS,         0 },
1024         { ICE_IPV6_OFOS,        14 },
1025         { ICE_UDP_OF,           54 },
1026         { ICE_GTP,              62 },
1027         { ICE_IPV4_IL,          82 },
1028         { ICE_PROTOCOL_LAST,    0 },
1029 };
1030
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033         0x00, 0x00, 0x00, 0x00,
1034         0x00, 0x00, 0x00, 0x00,
1035         0x86, 0xdd,
1036
1037         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038         0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039         0x00, 0x00, 0x00, 0x00,
1040         0x00, 0x00, 0x00, 0x00,
1041         0x00, 0x00, 0x00, 0x00,
1042         0x00, 0x00, 0x00, 0x00,
1043         0x00, 0x00, 0x00, 0x00,
1044         0x00, 0x00, 0x00, 0x00,
1045         0x00, 0x00, 0x00, 0x00,
1046         0x00, 0x00, 0x00, 0x00,
1047
1048         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049         0x00, 0x00, 0x00, 0x00,
1050
1051         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1052         0x00, 0x00, 0x00, 0x00,
1053         0x00, 0x00, 0x00, 0x85,
1054
1055         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056         0x00, 0x00, 0x00, 0x00,
1057
1058         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059         0x00, 0x00, 0x40, 0x00,
1060         0x40, 0x00, 0x00, 0x00,
1061         0x00, 0x00, 0x00, 0x00,
1062         0x00, 0x00, 0x00, 0x00,
1063
1064         0x00, 0x00,
1065 };
1066
1067 static const
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069         { ICE_MAC_OFOS,         0 },
1070         { ICE_IPV6_OFOS,        14 },
1071         { ICE_UDP_OF,           54 },
1072         { ICE_GTP,              62 },
1073         { ICE_IPV6_IL,          82 },
1074         { ICE_PROTOCOL_LAST,    0 },
1075 };
1076
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079         0x00, 0x00, 0x00, 0x00,
1080         0x00, 0x00, 0x00, 0x00,
1081         0x86, 0xdd,
1082
1083         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085         0x00, 0x00, 0x00, 0x00,
1086         0x00, 0x00, 0x00, 0x00,
1087         0x00, 0x00, 0x00, 0x00,
1088         0x00, 0x00, 0x00, 0x00,
1089         0x00, 0x00, 0x00, 0x00,
1090         0x00, 0x00, 0x00, 0x00,
1091         0x00, 0x00, 0x00, 0x00,
1092         0x00, 0x00, 0x00, 0x00,
1093
1094         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095         0x00, 0x00, 0x00, 0x00,
1096
1097         0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1098         0x00, 0x00, 0x00, 0x00,
1099         0x00, 0x00, 0x00, 0x85,
1100
1101         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102         0x00, 0x00, 0x00, 0x00,
1103
1104         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105         0x00, 0x00, 0x3b, 0x00,
1106         0x00, 0x00, 0x00, 0x00,
1107         0x00, 0x00, 0x00, 0x00,
1108         0x00, 0x00, 0x00, 0x00,
1109         0x00, 0x00, 0x00, 0x00,
1110         0x00, 0x00, 0x00, 0x00,
1111         0x00, 0x00, 0x00, 0x00,
1112         0x00, 0x00, 0x00, 0x00,
1113         0x00, 0x00, 0x00, 0x00,
1114
1115         0x00, 0x00,
1116 };
1117
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119         { ICE_MAC_OFOS,         0 },
1120         { ICE_IPV4_OFOS,        14 },
1121         { ICE_UDP_OF,           34 },
1122         { ICE_GTP,              42 },
1123         { ICE_PROTOCOL_LAST,    0 },
1124 };
1125
1126 static const u8 dummy_udp_gtp_packet[] = {
1127         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128         0x00, 0x00, 0x00, 0x00,
1129         0x00, 0x00, 0x00, 0x00,
1130         0x08, 0x00,
1131
1132         0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133         0x00, 0x00, 0x00, 0x00,
1134         0x00, 0x11, 0x00, 0x00,
1135         0x00, 0x00, 0x00, 0x00,
1136         0x00, 0x00, 0x00, 0x00,
1137
1138         0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139         0x00, 0x1c, 0x00, 0x00,
1140
1141         0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142         0x00, 0x00, 0x00, 0x00,
1143         0x00, 0x00, 0x00, 0x85,
1144
1145         0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146         0x00, 0x00, 0x00, 0x00,
1147
1148 };
1149
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151         { ICE_MAC_OFOS,         0 },
1152         { ICE_IPV4_OFOS,        14 },
1153         { ICE_UDP_OF,           34 },
1154         { ICE_GTP_NO_PAY,       42 },
1155         { ICE_PROTOCOL_LAST,    0 },
1156 };
1157
1158 static const
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160         { ICE_MAC_OFOS,         0 },
1161         { ICE_IPV6_OFOS,        14 },
1162         { ICE_UDP_OF,           54 },
1163         { ICE_GTP_NO_PAY,       62 },
1164         { ICE_PROTOCOL_LAST,    0 },
1165 };
1166
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169         0x00, 0x00, 0x00, 0x00,
1170         0x00, 0x00, 0x00, 0x00,
1171         0x86, 0xdd,
1172
1173         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174         0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175         0x00, 0x00, 0x00, 0x00,
1176         0x00, 0x00, 0x00, 0x00,
1177         0x00, 0x00, 0x00, 0x00,
1178         0x00, 0x00, 0x00, 0x00,
1179         0x00, 0x00, 0x00, 0x00,
1180         0x00, 0x00, 0x00, 0x00,
1181         0x00, 0x00, 0x00, 0x00,
1182         0x00, 0x00, 0x00, 0x00,
1183
1184         0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185         0x00, 0x00, 0x00, 0x00,
1186
1187         0x30, 0x00, 0x00, 0x28,  /* ICE_GTP 62 */
1188         0x00, 0x00, 0x00, 0x00,
1189
1190         0x00, 0x00,
1191 };
1192
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194         { ICE_MAC_OFOS,         0 },
1195         { ICE_VLAN_OFOS,        12 },
1196         { ICE_ETYPE_OL,         16 },
1197         { ICE_PPPOE,            18 },
1198         { ICE_PROTOCOL_LAST,    0 },
1199 };
1200
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202         { ICE_MAC_OFOS,         0 },
1203         { ICE_VLAN_OFOS,        12 },
1204         { ICE_ETYPE_OL,         16 },
1205         { ICE_PPPOE,            18 },
1206         { ICE_IPV4_OFOS,        26 },
1207         { ICE_PROTOCOL_LAST,    0 },
1208 };
1209
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212         0x00, 0x00, 0x00, 0x00,
1213         0x00, 0x00, 0x00, 0x00,
1214
1215         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1216
1217         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1218
1219         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1220         0x00, 0x16,
1221
1222         0x00, 0x21,             /* PPP Link Layer 24 */
1223
1224         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225         0x00, 0x00, 0x00, 0x00,
1226         0x00, 0x00, 0x00, 0x00,
1227         0x00, 0x00, 0x00, 0x00,
1228         0x00, 0x00, 0x00, 0x00,
1229
1230         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1231 };
1232
1233 static const
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235         { ICE_MAC_OFOS,         0 },
1236         { ICE_VLAN_OFOS,        12 },
1237         { ICE_ETYPE_OL,         16 },
1238         { ICE_PPPOE,            18 },
1239         { ICE_IPV4_OFOS,        26 },
1240         { ICE_TCP_IL,           46 },
1241         { ICE_PROTOCOL_LAST,    0 },
1242 };
1243
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246         0x00, 0x00, 0x00, 0x00,
1247         0x00, 0x00, 0x00, 0x00,
1248
1249         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1250
1251         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1252
1253         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1254         0x00, 0x16,
1255
1256         0x00, 0x21,             /* PPP Link Layer 24 */
1257
1258         0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259         0x00, 0x01, 0x00, 0x00,
1260         0x00, 0x06, 0x00, 0x00,
1261         0x00, 0x00, 0x00, 0x00,
1262         0x00, 0x00, 0x00, 0x00,
1263
1264         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265         0x00, 0x00, 0x00, 0x00,
1266         0x00, 0x00, 0x00, 0x00,
1267         0x50, 0x00, 0x00, 0x00,
1268         0x00, 0x00, 0x00, 0x00,
1269
1270         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1271 };
1272
1273 static const
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275         { ICE_MAC_OFOS,         0 },
1276         { ICE_VLAN_OFOS,        12 },
1277         { ICE_ETYPE_OL,         16 },
1278         { ICE_PPPOE,            18 },
1279         { ICE_IPV4_OFOS,        26 },
1280         { ICE_UDP_ILOS,         46 },
1281         { ICE_PROTOCOL_LAST,    0 },
1282 };
1283
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286         0x00, 0x00, 0x00, 0x00,
1287         0x00, 0x00, 0x00, 0x00,
1288
1289         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1290
1291         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1292
1293         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1294         0x00, 0x16,
1295
1296         0x00, 0x21,             /* PPP Link Layer 24 */
1297
1298         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299         0x00, 0x01, 0x00, 0x00,
1300         0x00, 0x11, 0x00, 0x00,
1301         0x00, 0x00, 0x00, 0x00,
1302         0x00, 0x00, 0x00, 0x00,
1303
1304         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305         0x00, 0x08, 0x00, 0x00,
1306
1307         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1308 };
1309
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311         { ICE_MAC_OFOS,         0 },
1312         { ICE_VLAN_OFOS,        12 },
1313         { ICE_ETYPE_OL,         16 },
1314         { ICE_PPPOE,            18 },
1315         { ICE_IPV6_OFOS,        26 },
1316         { ICE_PROTOCOL_LAST,    0 },
1317 };
1318
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321         0x00, 0x00, 0x00, 0x00,
1322         0x00, 0x00, 0x00, 0x00,
1323
1324         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1325
1326         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1327
1328         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1329         0x00, 0x2a,
1330
1331         0x00, 0x57,             /* PPP Link Layer 24 */
1332
1333         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334         0x00, 0x00, 0x3b, 0x00,
1335         0x00, 0x00, 0x00, 0x00,
1336         0x00, 0x00, 0x00, 0x00,
1337         0x00, 0x00, 0x00, 0x00,
1338         0x00, 0x00, 0x00, 0x00,
1339         0x00, 0x00, 0x00, 0x00,
1340         0x00, 0x00, 0x00, 0x00,
1341         0x00, 0x00, 0x00, 0x00,
1342         0x00, 0x00, 0x00, 0x00,
1343
1344         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1345 };
1346
1347 static const
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349         { ICE_MAC_OFOS,         0 },
1350         { ICE_VLAN_OFOS,        12 },
1351         { ICE_ETYPE_OL,         16 },
1352         { ICE_PPPOE,            18 },
1353         { ICE_IPV6_OFOS,        26 },
1354         { ICE_TCP_IL,           66 },
1355         { ICE_PROTOCOL_LAST,    0 },
1356 };
1357
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360         0x00, 0x00, 0x00, 0x00,
1361         0x00, 0x00, 0x00, 0x00,
1362
1363         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1364
1365         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1366
1367         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1368         0x00, 0x2a,
1369
1370         0x00, 0x57,             /* PPP Link Layer 24 */
1371
1372         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373         0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374         0x00, 0x00, 0x00, 0x00,
1375         0x00, 0x00, 0x00, 0x00,
1376         0x00, 0x00, 0x00, 0x00,
1377         0x00, 0x00, 0x00, 0x00,
1378         0x00, 0x00, 0x00, 0x00,
1379         0x00, 0x00, 0x00, 0x00,
1380         0x00, 0x00, 0x00, 0x00,
1381         0x00, 0x00, 0x00, 0x00,
1382
1383         0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384         0x00, 0x00, 0x00, 0x00,
1385         0x00, 0x00, 0x00, 0x00,
1386         0x50, 0x00, 0x00, 0x00,
1387         0x00, 0x00, 0x00, 0x00,
1388
1389         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1390 };
1391
1392 static const
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394         { ICE_MAC_OFOS,         0 },
1395         { ICE_VLAN_OFOS,        12 },
1396         { ICE_ETYPE_OL,         16 },
1397         { ICE_PPPOE,            18 },
1398         { ICE_IPV6_OFOS,        26 },
1399         { ICE_UDP_ILOS,         66 },
1400         { ICE_PROTOCOL_LAST,    0 },
1401 };
1402
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405         0x00, 0x00, 0x00, 0x00,
1406         0x00, 0x00, 0x00, 0x00,
1407
1408         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1409
1410         0x88, 0x64,             /* ICE_ETYPE_OL 16 */
1411
1412         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1413         0x00, 0x2a,
1414
1415         0x00, 0x57,             /* PPP Link Layer 24 */
1416
1417         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418         0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419         0x00, 0x00, 0x00, 0x00,
1420         0x00, 0x00, 0x00, 0x00,
1421         0x00, 0x00, 0x00, 0x00,
1422         0x00, 0x00, 0x00, 0x00,
1423         0x00, 0x00, 0x00, 0x00,
1424         0x00, 0x00, 0x00, 0x00,
1425         0x00, 0x00, 0x00, 0x00,
1426         0x00, 0x00, 0x00, 0x00,
1427
1428         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429         0x00, 0x08, 0x00, 0x00,
1430
1431         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1432 };
1433
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435         { ICE_MAC_OFOS,         0 },
1436         { ICE_IPV4_OFOS,        14 },
1437         { ICE_ESP,                      34 },
1438         { ICE_PROTOCOL_LAST,    0 },
1439 };
1440
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443         0x00, 0x00, 0x00, 0x00,
1444         0x00, 0x00, 0x00, 0x00,
1445         0x08, 0x00,
1446
1447         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448         0x00, 0x00, 0x40, 0x00,
1449         0x40, 0x32, 0x00, 0x00,
1450         0x00, 0x00, 0x00, 0x00,
1451         0x00, 0x00, 0x00, 0x00,
1452
1453         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454         0x00, 0x00, 0x00, 0x00,
1455         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1456 };
1457
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459         { ICE_MAC_OFOS,         0 },
1460         { ICE_IPV6_OFOS,        14 },
1461         { ICE_ESP,                      54 },
1462         { ICE_PROTOCOL_LAST,    0 },
1463 };
1464
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467         0x00, 0x00, 0x00, 0x00,
1468         0x00, 0x00, 0x00, 0x00,
1469         0x86, 0xDD,
1470
1471         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472         0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473         0x00, 0x00, 0x00, 0x00,
1474         0x00, 0x00, 0x00, 0x00,
1475         0x00, 0x00, 0x00, 0x00,
1476         0x00, 0x00, 0x00, 0x00,
1477         0x00, 0x00, 0x00, 0x00,
1478         0x00, 0x00, 0x00, 0x00,
1479         0x00, 0x00, 0x00, 0x00,
1480         0x00, 0x00, 0x00, 0x00,
1481
1482         0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483         0x00, 0x00, 0x00, 0x00,
1484         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1485 };
1486
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488         { ICE_MAC_OFOS,         0 },
1489         { ICE_IPV4_OFOS,        14 },
1490         { ICE_AH,                       34 },
1491         { ICE_PROTOCOL_LAST,    0 },
1492 };
1493
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496         0x00, 0x00, 0x00, 0x00,
1497         0x00, 0x00, 0x00, 0x00,
1498         0x08, 0x00,
1499
1500         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501         0x00, 0x00, 0x40, 0x00,
1502         0x40, 0x33, 0x00, 0x00,
1503         0x00, 0x00, 0x00, 0x00,
1504         0x00, 0x00, 0x00, 0x00,
1505
1506         0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507         0x00, 0x00, 0x00, 0x00,
1508         0x00, 0x00, 0x00, 0x00,
1509         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1510 };
1511
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513         { ICE_MAC_OFOS,         0 },
1514         { ICE_IPV6_OFOS,        14 },
1515         { ICE_AH,                       54 },
1516         { ICE_PROTOCOL_LAST,    0 },
1517 };
1518
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521         0x00, 0x00, 0x00, 0x00,
1522         0x00, 0x00, 0x00, 0x00,
1523         0x86, 0xDD,
1524
1525         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526         0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527         0x00, 0x00, 0x00, 0x00,
1528         0x00, 0x00, 0x00, 0x00,
1529         0x00, 0x00, 0x00, 0x00,
1530         0x00, 0x00, 0x00, 0x00,
1531         0x00, 0x00, 0x00, 0x00,
1532         0x00, 0x00, 0x00, 0x00,
1533         0x00, 0x00, 0x00, 0x00,
1534         0x00, 0x00, 0x00, 0x00,
1535
1536         0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537         0x00, 0x00, 0x00, 0x00,
1538         0x00, 0x00, 0x00, 0x00,
1539         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1540 };
1541
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543         { ICE_MAC_OFOS,         0 },
1544         { ICE_IPV4_OFOS,        14 },
1545         { ICE_UDP_ILOS,         34 },
1546         { ICE_NAT_T,            42 },
1547         { ICE_PROTOCOL_LAST,    0 },
1548 };
1549
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552         0x00, 0x00, 0x00, 0x00,
1553         0x00, 0x00, 0x00, 0x00,
1554         0x08, 0x00,
1555
1556         0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557         0x00, 0x00, 0x40, 0x00,
1558         0x40, 0x11, 0x00, 0x00,
1559         0x00, 0x00, 0x00, 0x00,
1560         0x00, 0x00, 0x00, 0x00,
1561
1562         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563         0x00, 0x00, 0x00, 0x00,
1564
1565         0x00, 0x00, 0x00, 0x00,
1566         0x00, 0x00, 0x00, 0x00,
1567         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1568 };
1569
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571         { ICE_MAC_OFOS,         0 },
1572         { ICE_IPV6_OFOS,        14 },
1573         { ICE_UDP_ILOS,         54 },
1574         { ICE_NAT_T,            62 },
1575         { ICE_PROTOCOL_LAST,    0 },
1576 };
1577
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580         0x00, 0x00, 0x00, 0x00,
1581         0x00, 0x00, 0x00, 0x00,
1582         0x86, 0xDD,
1583
1584         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585         0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586         0x00, 0x00, 0x00, 0x00,
1587         0x00, 0x00, 0x00, 0x00,
1588         0x00, 0x00, 0x00, 0x00,
1589         0x00, 0x00, 0x00, 0x00,
1590         0x00, 0x00, 0x00, 0x00,
1591         0x00, 0x00, 0x00, 0x00,
1592         0x00, 0x00, 0x00, 0x00,
1593         0x00, 0x00, 0x00, 0x00,
1594
1595         0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596         0x00, 0x00, 0x00, 0x00,
1597
1598         0x00, 0x00, 0x00, 0x00,
1599         0x00, 0x00, 0x00, 0x00,
1600         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1601
1602 };
1603
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605         { ICE_MAC_OFOS,         0 },
1606         { ICE_IPV4_OFOS,        14 },
1607         { ICE_L2TPV3,           34 },
1608         { ICE_PROTOCOL_LAST,    0 },
1609 };
1610
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613         0x00, 0x00, 0x00, 0x00,
1614         0x00, 0x00, 0x00, 0x00,
1615         0x08, 0x00,
1616
1617         0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618         0x00, 0x00, 0x40, 0x00,
1619         0x40, 0x73, 0x00, 0x00,
1620         0x00, 0x00, 0x00, 0x00,
1621         0x00, 0x00, 0x00, 0x00,
1622
1623         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624         0x00, 0x00, 0x00, 0x00,
1625         0x00, 0x00, 0x00, 0x00,
1626         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1627 };
1628
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630         { ICE_MAC_OFOS,         0 },
1631         { ICE_IPV6_OFOS,        14 },
1632         { ICE_L2TPV3,           54 },
1633         { ICE_PROTOCOL_LAST,    0 },
1634 };
1635
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638         0x00, 0x00, 0x00, 0x00,
1639         0x00, 0x00, 0x00, 0x00,
1640         0x86, 0xDD,
1641
1642         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643         0x00, 0x0c, 0x73, 0x40,
1644         0x00, 0x00, 0x00, 0x00,
1645         0x00, 0x00, 0x00, 0x00,
1646         0x00, 0x00, 0x00, 0x00,
1647         0x00, 0x00, 0x00, 0x00,
1648         0x00, 0x00, 0x00, 0x00,
1649         0x00, 0x00, 0x00, 0x00,
1650         0x00, 0x00, 0x00, 0x00,
1651         0x00, 0x00, 0x00, 0x00,
1652
1653         0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654         0x00, 0x00, 0x00, 0x00,
1655         0x00, 0x00, 0x00, 0x00,
1656         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1657 };
1658
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660         { ICE_MAC_OFOS,         0 },
1661         { ICE_VLAN_EX,          12 },
1662         { ICE_VLAN_IN,          16 },
1663         { ICE_ETYPE_OL,         20 },
1664         { ICE_IPV4_OFOS,        22 },
1665         { ICE_PROTOCOL_LAST,    0 },
1666 };
1667
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670         0x00, 0x00, 0x00, 0x00,
1671         0x00, 0x00, 0x00, 0x00,
1672
1673         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1674         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1675         0x08, 0x00,             /* ICE_ETYPE_OL 20 */
1676
1677         0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1678         0x00, 0x01, 0x00, 0x00,
1679         0x00, 0x11, 0x00, 0x00,
1680         0x00, 0x00, 0x00, 0x00,
1681         0x00, 0x00, 0x00, 0x00,
1682
1683         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1684         0x00, 0x08, 0x00, 0x00,
1685
1686         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1687 };
1688
1689 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1690         { ICE_MAC_OFOS,         0 },
1691         { ICE_VLAN_EX,          12 },
1692         { ICE_VLAN_IN,          16 },
1693         { ICE_ETYPE_OL,         20 },
1694         { ICE_IPV6_OFOS,        22 },
1695         { ICE_PROTOCOL_LAST,    0 },
1696 };
1697
1698 static const u8 dummy_qinq_ipv6_pkt[] = {
1699         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1700         0x00, 0x00, 0x00, 0x00,
1701         0x00, 0x00, 0x00, 0x00,
1702
1703         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1704         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1705         0x86, 0xDD,             /* ICE_ETYPE_OL 20 */
1706
1707         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1708         0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1709         0x00, 0x00, 0x00, 0x00,
1710         0x00, 0x00, 0x00, 0x00,
1711         0x00, 0x00, 0x00, 0x00,
1712         0x00, 0x00, 0x00, 0x00,
1713         0x00, 0x00, 0x00, 0x00,
1714         0x00, 0x00, 0x00, 0x00,
1715         0x00, 0x00, 0x00, 0x00,
1716         0x00, 0x00, 0x00, 0x00,
1717
1718         0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1719         0x00, 0x10, 0x00, 0x00,
1720
1721         0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1722         0x00, 0x00, 0x00, 0x00,
1723
1724         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1725 };
1726
1727 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1728         { ICE_MAC_OFOS,         0 },
1729         { ICE_VLAN_EX,          12 },
1730         { ICE_VLAN_IN,          16 },
1731         { ICE_ETYPE_OL,         20 },
1732         { ICE_PPPOE,            22 },
1733         { ICE_PROTOCOL_LAST,    0 },
1734 };
1735
1736 static const
1737 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1738         { ICE_MAC_OFOS,         0 },
1739         { ICE_VLAN_EX,          12 },
1740         { ICE_VLAN_IN,          16 },
1741         { ICE_ETYPE_OL,         20 },
1742         { ICE_PPPOE,            22 },
1743         { ICE_IPV4_OFOS,        30 },
1744         { ICE_PROTOCOL_LAST,    0 },
1745 };
1746
1747 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1748         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1749         0x00, 0x00, 0x00, 0x00,
1750         0x00, 0x00, 0x00, 0x00,
1751
1752         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1753         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1754         0x88, 0x64,             /* ICE_ETYPE_OL 20 */
1755
1756         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1757         0x00, 0x16,
1758
1759         0x00, 0x21,             /* PPP Link Layer 28 */
1760
1761         0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1762         0x00, 0x00, 0x00, 0x00,
1763         0x00, 0x00, 0x00, 0x00,
1764         0x00, 0x00, 0x00, 0x00,
1765         0x00, 0x00, 0x00, 0x00,
1766
1767         0x00, 0x00,     /* 2 bytes for 4 byte alignment */
1768 };
1769
1770 static const
1771 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1772         { ICE_MAC_OFOS,         0 },
1773         { ICE_VLAN_EX,          12 },
1774         { ICE_VLAN_IN,          16 },
1775         { ICE_ETYPE_OL,         20 },
1776         { ICE_PPPOE,            22 },
1777         { ICE_IPV6_OFOS,        30 },
1778         { ICE_PROTOCOL_LAST,    0 },
1779 };
1780
1781 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1782         0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1783         0x00, 0x00, 0x00, 0x00,
1784         0x00, 0x00, 0x00, 0x00,
1785
1786         0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
1787         0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1788         0x88, 0x64,             /* ICE_ETYPE_OL 20 */
1789
1790         0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1791         0x00, 0x2a,
1792
1793         0x00, 0x57,             /* PPP Link Layer 28*/
1794
1795         0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1796         0x00, 0x00, 0x3b, 0x00,
1797         0x00, 0x00, 0x00, 0x00,
1798         0x00, 0x00, 0x00, 0x00,
1799         0x00, 0x00, 0x00, 0x00,
1800         0x00, 0x00, 0x00, 0x00,
1801         0x00, 0x00, 0x00, 0x00,
1802         0x00, 0x00, 0x00, 0x00,
1803         0x00, 0x00, 0x00, 0x00,
1804         0x00, 0x00, 0x00, 0x00,
1805
1806         0x00, 0x00,             /* 2 bytes for 4 bytes alignment */
1807 };
1808
1809 /* this is a recipe to profile association bitmap */
1810 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1811                           ICE_MAX_NUM_PROFILES);
1812
1813 /* this is a profile to recipe association bitmap */
1814 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1815                           ICE_MAX_NUM_RECIPES);
1816
1817 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1818
1819 /**
1820  * ice_collect_result_idx - copy result index values
1821  * @buf: buffer that contains the result index
1822  * @recp: the recipe struct to copy data into
1823  */
1824 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1825                                    struct ice_sw_recipe *recp)
1826 {
1827         if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1828                 ice_set_bit(buf->content.result_indx &
1829                             ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1830 }
1831
1832 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1833         { ICE_PROFID_IPV4_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV4},
1834         { ICE_PROFID_IPV4_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1835         { ICE_PROFID_IPV4_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1836         { ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1837         { ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1838         { ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1839         { ICE_PROFID_IPV4_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV6},
1840         { ICE_PROFID_IPV4_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1841         { ICE_PROFID_IPV4_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1842         { ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1843         { ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1844         { ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1845         { ICE_PROFID_IPV6_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV4},
1846         { ICE_PROFID_IPV6_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1847         { ICE_PROFID_IPV6_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1848         { ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1849         { ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1850         { ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1851         { ICE_PROFID_IPV6_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV6},
1852         { ICE_PROFID_IPV6_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1853         { ICE_PROFID_IPV6_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1854         { ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1855         { ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1856         { ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1857 };
1858
1859 /**
1860  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1861  * @rid: recipe ID that we are populating
1862  */
1863 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1864 {
1865         u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1866         u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1867         u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1868         u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1869         enum ice_sw_tunnel_type tun_type;
1870         u16 i, j, k, profile_num = 0;
1871         bool non_tun_valid = false;
1872         bool pppoe_valid = false;
1873         bool vxlan_valid = false;
1874         bool gre_valid = false;
1875         bool gtp_valid = false;
1876         bool flag_valid = false;
1877
1878         for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1879                 if (!ice_is_bit_set(recipe_to_profile[rid], j))
1880                         continue;
1881                 else
1882                         profile_num++;
1883
1884                 for (i = 0; i < 12; i++) {
1885                         if (gre_profile[i] == j)
1886                                 gre_valid = true;
1887                 }
1888
1889                 for (i = 0; i < 12; i++) {
1890                         if (vxlan_profile[i] == j)
1891                                 vxlan_valid = true;
1892                 }
1893
1894                 for (i = 0; i < 7; i++) {
1895                         if (pppoe_profile[i] == j)
1896                                 pppoe_valid = true;
1897                 }
1898
1899                 for (i = 0; i < 6; i++) {
1900                         if (non_tun_profile[i] == j)
1901                                 non_tun_valid = true;
1902                 }
1903
1904                 if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1905                     j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1906                         gtp_valid = true;
1907
1908                 if ((j >= ICE_PROFID_IPV4_ESP &&
1909                      j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1910                     (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1911                      j <= ICE_PROFID_IPV6_GTPU_TEID))
1912                         flag_valid = true;
1913         }
1914
1915         if (!non_tun_valid && vxlan_valid)
1916                 tun_type = ICE_SW_TUN_VXLAN;
1917         else if (!non_tun_valid && gre_valid)
1918                 tun_type = ICE_SW_TUN_NVGRE;
1919         else if (!non_tun_valid && pppoe_valid)
1920                 tun_type = ICE_SW_TUN_PPPOE;
1921         else if (!non_tun_valid && gtp_valid)
1922                 tun_type = ICE_SW_TUN_GTP;
1923         else if (non_tun_valid &&
1924                  (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1925                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1926         else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1927                  !pppoe_valid)
1928                 tun_type = ICE_NON_TUN;
1929         else
1930                 tun_type = ICE_NON_TUN;
1931
1932         if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1933                 i = ice_is_bit_set(recipe_to_profile[rid],
1934                                    ICE_PROFID_PPPOE_IPV4_OTHER);
1935                 j = ice_is_bit_set(recipe_to_profile[rid],
1936                                    ICE_PROFID_PPPOE_IPV6_OTHER);
1937                 if (i && !j)
1938                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1939                 else if (!i && j)
1940                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1941         }
1942
1943         if (tun_type == ICE_SW_TUN_GTP) {
1944                 for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
1945                         if (ice_is_bit_set(recipe_to_profile[rid],
1946                                            ice_prof_type_tbl[k].prof_id)) {
1947                                 tun_type = ice_prof_type_tbl[k].type;
1948                                 break;
1949                         }
1950         }
1951
1952         if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1953                 for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1954                         if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1955                                 switch (j) {
1956                                 case ICE_PROFID_IPV4_TCP:
1957                                         tun_type = ICE_SW_IPV4_TCP;
1958                                         break;
1959                                 case ICE_PROFID_IPV4_UDP:
1960                                         tun_type = ICE_SW_IPV4_UDP;
1961                                         break;
1962                                 case ICE_PROFID_IPV6_TCP:
1963                                         tun_type = ICE_SW_IPV6_TCP;
1964                                         break;
1965                                 case ICE_PROFID_IPV6_UDP:
1966                                         tun_type = ICE_SW_IPV6_UDP;
1967                                         break;
1968                                 case ICE_PROFID_PPPOE_PAY:
1969                                         tun_type = ICE_SW_TUN_PPPOE_PAY;
1970                                         break;
1971                                 case ICE_PROFID_PPPOE_IPV4_TCP:
1972                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1973                                         break;
1974                                 case ICE_PROFID_PPPOE_IPV4_UDP:
1975                                         tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1976                                         break;
1977                                 case ICE_PROFID_PPPOE_IPV4_OTHER:
1978                                         tun_type = ICE_SW_TUN_PPPOE_IPV4;
1979                                         break;
1980                                 case ICE_PROFID_PPPOE_IPV6_TCP:
1981                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1982                                         break;
1983                                 case ICE_PROFID_PPPOE_IPV6_UDP:
1984                                         tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1985                                         break;
1986                                 case ICE_PROFID_PPPOE_IPV6_OTHER:
1987                                         tun_type = ICE_SW_TUN_PPPOE_IPV6;
1988                                         break;
1989                                 case ICE_PROFID_IPV4_ESP:
1990                                         tun_type = ICE_SW_TUN_IPV4_ESP;
1991                                         break;
1992                                 case ICE_PROFID_IPV6_ESP:
1993                                         tun_type = ICE_SW_TUN_IPV6_ESP;
1994                                         break;
1995                                 case ICE_PROFID_IPV4_AH:
1996                                         tun_type = ICE_SW_TUN_IPV4_AH;
1997                                         break;
1998                                 case ICE_PROFID_IPV6_AH:
1999                                         tun_type = ICE_SW_TUN_IPV6_AH;
2000                                         break;
2001                                 case ICE_PROFID_IPV4_NAT_T:
2002                                         tun_type = ICE_SW_TUN_IPV4_NAT_T;
2003                                         break;
2004                                 case ICE_PROFID_IPV6_NAT_T:
2005                                         tun_type = ICE_SW_TUN_IPV6_NAT_T;
2006                                         break;
2007                                 case ICE_PROFID_IPV4_PFCP_NODE:
2008                                         tun_type =
2009                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2010                                         break;
2011                                 case ICE_PROFID_IPV6_PFCP_NODE:
2012                                         tun_type =
2013                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2014                                         break;
2015                                 case ICE_PROFID_IPV4_PFCP_SESSION:
2016                                         tun_type =
2017                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2018                                         break;
2019                                 case ICE_PROFID_IPV6_PFCP_SESSION:
2020                                         tun_type =
2021                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2022                                         break;
2023                                 case ICE_PROFID_MAC_IPV4_L2TPV3:
2024                                         tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2025                                         break;
2026                                 case ICE_PROFID_MAC_IPV6_L2TPV3:
2027                                         tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2028                                         break;
2029                                 case ICE_PROFID_IPV4_GTPU_TEID:
2030                                         tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2031                                         break;
2032                                 case ICE_PROFID_IPV6_GTPU_TEID:
2033                                         tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2034                                         break;
2035                                 default:
2036                                         break;
2037                                 }
2038
2039                                 return tun_type;
2040                         }
2041                 }
2042         }
2043
2044         if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2045                 tun_type = ICE_SW_TUN_PPPOE_QINQ;
2046         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2047                 tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2048         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2049                 tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2050         else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2051                 tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2052         else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2053                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2054         else if (vlan && tun_type == ICE_NON_TUN)
2055                 tun_type = ICE_NON_TUN_QINQ;
2056
2057         return tun_type;
2058 }
2059
2060 /**
2061  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2062  * @hw: pointer to hardware structure
2063  * @recps: struct that we need to populate
2064  * @rid: recipe ID that we are populating
2065  * @refresh_required: true if we should get recipe to profile mapping from FW
2066  *
2067  * This function is used to populate all the necessary entries into our
2068  * bookkeeping so that we have a current list of all the recipes that are
2069  * programmed in the firmware.
2070  */
2071 static enum ice_status
2072 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2073                     bool *refresh_required)
2074 {
2075         ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2076         struct ice_aqc_recipe_data_elem *tmp;
2077         u16 num_recps = ICE_MAX_NUM_RECIPES;
2078         struct ice_prot_lkup_ext *lkup_exts;
2079         enum ice_status status;
2080         u8 fv_word_idx = 0;
2081         bool vlan = false;
2082         u16 sub_recps;
2083
2084         ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2085
2086         /* we need a buffer big enough to accommodate all the recipes */
2087         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2088                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2089         if (!tmp)
2090                 return ICE_ERR_NO_MEMORY;
2091
2092         tmp[0].recipe_indx = rid;
2093         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2094         /* non-zero status meaning recipe doesn't exist */
2095         if (status)
2096                 goto err_unroll;
2097
2098         /* Get recipe to profile map so that we can get the fv from lkups that
2099          * we read for a recipe from FW. Since we want to minimize the number of
2100          * times we make this FW call, just make one call and cache the copy
2101          * until a new recipe is added. This operation is only required the
2102          * first time to get the changes from FW. Then to search existing
2103          * entries we don't need to update the cache again until another recipe
2104          * gets added.
2105          */
2106         if (*refresh_required) {
2107                 ice_get_recp_to_prof_map(hw);
2108                 *refresh_required = false;
2109         }
2110
2111         /* Start populating all the entries for recps[rid] based on lkups from
2112          * firmware. Note that we are only creating the root recipe in our
2113          * database.
2114          */
2115         lkup_exts = &recps[rid].lkup_exts;
2116
2117         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2118                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2119                 struct ice_recp_grp_entry *rg_entry;
2120                 u8 i, prof, idx, prot = 0;
2121                 bool is_root;
2122                 u16 off = 0;
2123
2124                 rg_entry = (struct ice_recp_grp_entry *)
2125                         ice_malloc(hw, sizeof(*rg_entry));
2126                 if (!rg_entry) {
2127                         status = ICE_ERR_NO_MEMORY;
2128                         goto err_unroll;
2129                 }
2130
2131                 idx = root_bufs.recipe_indx;
2132                 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2133
2134                 /* Mark all result indices in this chain */
2135                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2136                         ice_set_bit(root_bufs.content.result_indx &
2137                                     ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2138
2139                 /* get the first profile that is associated with rid */
2140                 prof = ice_find_first_bit(recipe_to_profile[idx],
2141                                           ICE_MAX_NUM_PROFILES);
2142                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2143                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2144
2145                         rg_entry->fv_idx[i] = lkup_indx;
2146                         rg_entry->fv_mask[i] =
2147                                 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2148
2149                         /* If the recipe is a chained recipe then all its
2150                          * child recipe's result will have a result index.
2151                          * To fill fv_words we should not use those result
2152                          * index, we only need the protocol ids and offsets.
2153                          * We will skip all the fv_idx which stores result
2154                          * index in them. We also need to skip any fv_idx which
2155                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2156                          * valid offset value.
2157                          */
2158                         if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2159                                            rg_entry->fv_idx[i]) ||
2160                             rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2161                             rg_entry->fv_idx[i] == 0)
2162                                 continue;
2163
2164                         ice_find_prot_off(hw, ICE_BLK_SW, prof,
2165                                           rg_entry->fv_idx[i], &prot, &off);
2166                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2167                         lkup_exts->fv_words[fv_word_idx].off = off;
2168                         lkup_exts->field_mask[fv_word_idx] =
2169                                 rg_entry->fv_mask[i];
2170                         if (prot == ICE_META_DATA_ID_HW &&
2171                             off == ICE_TUN_FLAG_MDID_OFF)
2172                                 vlan = true;
2173                         fv_word_idx++;
2174                 }
2175                 /* populate rg_list with the data from the child entry of this
2176                  * recipe
2177                  */
2178                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2179
2180                 /* Propagate some data to the recipe database */
2181                 recps[idx].is_root = !!is_root;
2182                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2183                 ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2184                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2185                         recps[idx].chain_idx = root_bufs.content.result_indx &
2186                                 ~ICE_AQ_RECIPE_RESULT_EN;
2187                         ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2188                 } else {
2189                         recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2190                 }
2191
2192                 if (!is_root)
2193                         continue;
2194
2195                 /* Only do the following for root recipes entries */
2196                 ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2197                            sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2198                 recps[idx].root_rid = root_bufs.content.rid &
2199                         ~ICE_AQ_RECIPE_ID_IS_ROOT;
2200                 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2201         }
2202
2203         /* Complete initialization of the root recipe entry */
2204         lkup_exts->n_val_words = fv_word_idx;
2205         recps[rid].big_recp = (num_recps > 1);
2206         recps[rid].n_grp_count = (u8)num_recps;
2207         recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2208         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2209                 ice_memdup(hw, tmp, recps[rid].n_grp_count *
2210                            sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2211         if (!recps[rid].root_buf)
2212                 goto err_unroll;
2213
2214         /* Copy result indexes */
2215         ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2216         recps[rid].recp_created = true;
2217
2218 err_unroll:
2219         ice_free(hw, tmp);
2220         return status;
2221 }
2222
2223 /**
2224  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2225  * @hw: pointer to hardware structure
2226  *
2227  * This function is used to populate recipe_to_profile matrix where index to
2228  * this array is the recipe ID and the element is the mapping of which profiles
2229  * is this recipe mapped to.
2230  */
2231 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2232 {
2233         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2234         u16 i;
2235
2236         for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2237                 u16 j;
2238
2239                 ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2240                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2241                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2242                         continue;
2243                 ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2244                               ICE_MAX_NUM_RECIPES);
2245                 ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2246                         ice_set_bit(i, recipe_to_profile[j]);
2247         }
2248 }
2249
2250 /**
2251  * ice_init_def_sw_recp - initialize the recipe book keeping tables
2252  * @hw: pointer to the HW struct
2253  * @recp_list: pointer to sw recipe list
2254  *
2255  * Allocate memory for the entire recipe table and initialize the structures/
2256  * entries corresponding to basic recipes.
2257  */
2258 enum ice_status
2259 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2260 {
2261         struct ice_sw_recipe *recps;
2262         u8 i;
2263
2264         recps = (struct ice_sw_recipe *)
2265                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2266         if (!recps)
2267                 return ICE_ERR_NO_MEMORY;
2268
2269         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2270                 recps[i].root_rid = i;
2271                 INIT_LIST_HEAD(&recps[i].filt_rules);
2272                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2273                 INIT_LIST_HEAD(&recps[i].rg_list);
2274                 ice_init_lock(&recps[i].filt_rule_lock);
2275         }
2276
2277         *recp_list = recps;
2278
2279         return ICE_SUCCESS;
2280 }
2281
2282 /**
2283  * ice_aq_get_sw_cfg - get switch configuration
2284  * @hw: pointer to the hardware structure
2285  * @buf: pointer to the result buffer
2286  * @buf_size: length of the buffer available for response
2287  * @req_desc: pointer to requested descriptor
2288  * @num_elems: pointer to number of elements
2289  * @cd: pointer to command details structure or NULL
2290  *
2291  * Get switch configuration (0x0200) to be placed in buf.
2292  * This admin command returns information such as initial VSI/port number
2293  * and switch ID it belongs to.
2294  *
2295  * NOTE: *req_desc is both an input/output parameter.
2296  * The caller of this function first calls this function with *request_desc set
2297  * to 0. If the response from f/w has *req_desc set to 0, all the switch
2298  * configuration information has been returned; if non-zero (meaning not all
2299  * the information was returned), the caller should call this function again
2300  * with *req_desc set to the previous value returned by f/w to get the
2301  * next block of switch configuration information.
2302  *
2303  * *num_elems is output only parameter. This reflects the number of elements
2304  * in response buffer. The caller of this function to use *num_elems while
2305  * parsing the response buffer.
2306  */
2307 static enum ice_status
2308 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2309                   u16 buf_size, u16 *req_desc, u16 *num_elems,
2310                   struct ice_sq_cd *cd)
2311 {
2312         struct ice_aqc_get_sw_cfg *cmd;
2313         struct ice_aq_desc desc;
2314         enum ice_status status;
2315
2316         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2317         cmd = &desc.params.get_sw_conf;
2318         cmd->element = CPU_TO_LE16(*req_desc);
2319
2320         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2321         if (!status) {
2322                 *req_desc = LE16_TO_CPU(cmd->element);
2323                 *num_elems = LE16_TO_CPU(cmd->num_elems);
2324         }
2325
2326         return status;
2327 }
2328
2329 /**
2330  * ice_alloc_rss_global_lut - allocate a RSS global LUT
2331  * @hw: pointer to the HW struct
2332  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2333  * @global_lut_id: output parameter for the RSS global LUT's ID
2334  */
2335 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2336 {
2337         struct ice_aqc_alloc_free_res_elem *sw_buf;
2338         enum ice_status status;
2339         u16 buf_len;
2340
2341         buf_len = ice_struct_size(sw_buf, elem, 1);
2342         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2343         if (!sw_buf)
2344                 return ICE_ERR_NO_MEMORY;
2345
2346         sw_buf->num_elems = CPU_TO_LE16(1);
2347         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2348                                        (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2349                                        ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2350
2351         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2352         if (status) {
2353                 ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2354                           shared_res ? "shared" : "dedicated", status);
2355                 goto ice_alloc_global_lut_exit;
2356         }
2357
2358         *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2359
2360 ice_alloc_global_lut_exit:
2361         ice_free(hw, sw_buf);
2362         return status;
2363 }
2364
2365 /**
2366  * ice_free_rss_global_lut - free a RSS global LUT
2367  * @hw: pointer to the HW struct
2368  * @global_lut_id: ID of the RSS global LUT to free
2369  */
2370 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2371 {
2372         struct ice_aqc_alloc_free_res_elem *sw_buf;
2373         u16 buf_len, num_elems = 1;
2374         enum ice_status status;
2375
2376         buf_len = ice_struct_size(sw_buf, elem, num_elems);
2377         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2378         if (!sw_buf)
2379                 return ICE_ERR_NO_MEMORY;
2380
2381         sw_buf->num_elems = CPU_TO_LE16(num_elems);
2382         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2383         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2384
2385         status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2386         if (status)
2387                 ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2388                           global_lut_id, status);
2389
2390         ice_free(hw, sw_buf);
2391         return status;
2392 }
2393
2394 /**
2395  * ice_alloc_sw - allocate resources specific to switch
2396  * @hw: pointer to the HW struct
2397  * @ena_stats: true to turn on VEB stats
2398  * @shared_res: true for shared resource, false for dedicated resource
2399  * @sw_id: switch ID returned
2400  * @counter_id: VEB counter ID returned
2401  *
2402  * allocates switch resources (SWID and VEB counter) (0x0208)
2403  */
2404 enum ice_status
2405 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2406              u16 *counter_id)
2407 {
2408         struct ice_aqc_alloc_free_res_elem *sw_buf;
2409         struct ice_aqc_res_elem *sw_ele;
2410         enum ice_status status;
2411         u16 buf_len;
2412
2413         buf_len = ice_struct_size(sw_buf, elem, 1);
2414         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2415         if (!sw_buf)
2416                 return ICE_ERR_NO_MEMORY;
2417
2418         /* Prepare buffer for switch ID.
2419          * The number of resource entries in buffer is passed as 1 since only a
2420          * single switch/VEB instance is allocated, and hence a single sw_id
2421          * is requested.
2422          */
2423         sw_buf->num_elems = CPU_TO_LE16(1);
2424         sw_buf->res_type =
2425                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2426                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2427                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2428
2429         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2430                                        ice_aqc_opc_alloc_res, NULL);
2431
2432         if (status)
2433                 goto ice_alloc_sw_exit;
2434
2435         sw_ele = &sw_buf->elem[0];
2436         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2437
2438         if (ena_stats) {
2439                 /* Prepare buffer for VEB Counter */
2440                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2441                 struct ice_aqc_alloc_free_res_elem *counter_buf;
2442                 struct ice_aqc_res_elem *counter_ele;
2443
2444                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2445                                 ice_malloc(hw, buf_len);
2446                 if (!counter_buf) {
2447                         status = ICE_ERR_NO_MEMORY;
2448                         goto ice_alloc_sw_exit;
2449                 }
2450
2451                 /* The number of resource entries in buffer is passed as 1 since
2452                  * only a single switch/VEB instance is allocated, and hence a
2453                  * single VEB counter is requested.
2454                  */
2455                 counter_buf->num_elems = CPU_TO_LE16(1);
2456                 counter_buf->res_type =
2457                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2458                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2459                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2460                                                opc, NULL);
2461
2462                 if (status) {
2463                         ice_free(hw, counter_buf);
2464                         goto ice_alloc_sw_exit;
2465                 }
2466                 counter_ele = &counter_buf->elem[0];
2467                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2468                 ice_free(hw, counter_buf);
2469         }
2470
2471 ice_alloc_sw_exit:
2472         ice_free(hw, sw_buf);
2473         return status;
2474 }
2475
2476 /**
2477  * ice_free_sw - free resources specific to switch
2478  * @hw: pointer to the HW struct
2479  * @sw_id: switch ID returned
2480  * @counter_id: VEB counter ID returned
2481  *
2482  * free switch resources (SWID and VEB counter) (0x0209)
2483  *
2484  * NOTE: This function frees multiple resources. It continues
2485  * releasing other resources even after it encounters error.
2486  * The error code returned is the last error it encountered.
2487  */
2488 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2489 {
2490         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2491         enum ice_status status, ret_status;
2492         u16 buf_len;
2493
2494         buf_len = ice_struct_size(sw_buf, elem, 1);
2495         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2496         if (!sw_buf)
2497                 return ICE_ERR_NO_MEMORY;
2498
2499         /* Prepare buffer to free for switch ID res.
2500          * The number of resource entries in buffer is passed as 1 since only a
2501          * single switch/VEB instance is freed, and hence a single sw_id
2502          * is released.
2503          */
2504         sw_buf->num_elems = CPU_TO_LE16(1);
2505         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2506         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2507
2508         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2509                                            ice_aqc_opc_free_res, NULL);
2510
2511         if (ret_status)
2512                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2513
2514         /* Prepare buffer to free for VEB Counter resource */
2515         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2516                         ice_malloc(hw, buf_len);
2517         if (!counter_buf) {
2518                 ice_free(hw, sw_buf);
2519                 return ICE_ERR_NO_MEMORY;
2520         }
2521
2522         /* The number of resource entries in buffer is passed as 1 since only a
2523          * single switch/VEB instance is freed, and hence a single VEB counter
2524          * is released
2525          */
2526         counter_buf->num_elems = CPU_TO_LE16(1);
2527         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2528         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2529
2530         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2531                                        ice_aqc_opc_free_res, NULL);
2532         if (status) {
2533                 ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2534                 ret_status = status;
2535         }
2536
2537         ice_free(hw, counter_buf);
2538         ice_free(hw, sw_buf);
2539         return ret_status;
2540 }
2541
2542 /**
2543  * ice_aq_add_vsi
2544  * @hw: pointer to the HW struct
2545  * @vsi_ctx: pointer to a VSI context struct
2546  * @cd: pointer to command details structure or NULL
2547  *
2548  * Add a VSI context to the hardware (0x0210)
2549  */
2550 enum ice_status
2551 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2552                struct ice_sq_cd *cd)
2553 {
2554         struct ice_aqc_add_update_free_vsi_resp *res;
2555         struct ice_aqc_add_get_update_free_vsi *cmd;
2556         struct ice_aq_desc desc;
2557         enum ice_status status;
2558
2559         cmd = &desc.params.vsi_cmd;
2560         res = &desc.params.add_update_free_vsi_res;
2561
2562         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2563
2564         if (!vsi_ctx->alloc_from_pool)
2565                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2566                                            ICE_AQ_VSI_IS_VALID);
2567
2568         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2569
2570         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2571
2572         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2573                                  sizeof(vsi_ctx->info), cd);
2574
2575         if (!status) {
2576                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2577                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2578                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2579         }
2580
2581         return status;
2582 }
2583
2584 /**
2585  * ice_aq_free_vsi
2586  * @hw: pointer to the HW struct
2587  * @vsi_ctx: pointer to a VSI context struct
2588  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2589  * @cd: pointer to command details structure or NULL
2590  *
2591  * Free VSI context info from hardware (0x0213)
2592  */
2593 enum ice_status
2594 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2595                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
2596 {
2597         struct ice_aqc_add_update_free_vsi_resp *resp;
2598         struct ice_aqc_add_get_update_free_vsi *cmd;
2599         struct ice_aq_desc desc;
2600         enum ice_status status;
2601
2602         cmd = &desc.params.vsi_cmd;
2603         resp = &desc.params.add_update_free_vsi_res;
2604
2605         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2606
2607         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2608         if (keep_vsi_alloc)
2609                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2610
2611         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2612         if (!status) {
2613                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2614                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2615         }
2616
2617         return status;
2618 }
2619
2620 /**
2621  * ice_aq_update_vsi
2622  * @hw: pointer to the HW struct
2623  * @vsi_ctx: pointer to a VSI context struct
2624  * @cd: pointer to command details structure or NULL
2625  *
2626  * Update VSI context in the hardware (0x0211)
2627  */
2628 enum ice_status
2629 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2630                   struct ice_sq_cd *cd)
2631 {
2632         struct ice_aqc_add_update_free_vsi_resp *resp;
2633         struct ice_aqc_add_get_update_free_vsi *cmd;
2634         struct ice_aq_desc desc;
2635         enum ice_status status;
2636
2637         cmd = &desc.params.vsi_cmd;
2638         resp = &desc.params.add_update_free_vsi_res;
2639
2640         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2641
2642         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2643
2644         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2645
2646         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2647                                  sizeof(vsi_ctx->info), cd);
2648
2649         if (!status) {
2650                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2651                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2652         }
2653
2654         return status;
2655 }
2656
2657 /**
2658  * ice_is_vsi_valid - check whether the VSI is valid or not
2659  * @hw: pointer to the HW struct
2660  * @vsi_handle: VSI handle
2661  *
2662  * check whether the VSI is valid or not
2663  */
2664 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2665 {
2666         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2667 }
2668
2669 /**
2670  * ice_get_hw_vsi_num - return the HW VSI number
2671  * @hw: pointer to the HW struct
2672  * @vsi_handle: VSI handle
2673  *
2674  * return the HW VSI number
2675  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2676  */
2677 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2678 {
2679         return hw->vsi_ctx[vsi_handle]->vsi_num;
2680 }
2681
2682 /**
2683  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2684  * @hw: pointer to the HW struct
2685  * @vsi_handle: VSI handle
2686  *
2687  * return the VSI context entry for a given VSI handle
2688  */
2689 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2690 {
2691         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2692 }
2693
2694 /**
2695  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2696  * @hw: pointer to the HW struct
2697  * @vsi_handle: VSI handle
2698  * @vsi: VSI context pointer
2699  *
2700  * save the VSI context entry for a given VSI handle
2701  */
2702 static void
2703 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2704 {
2705         hw->vsi_ctx[vsi_handle] = vsi;
2706 }
2707
2708 /**
2709  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2710  * @hw: pointer to the HW struct
2711  * @vsi_handle: VSI handle
2712  */
2713 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2714 {
2715         struct ice_vsi_ctx *vsi;
2716         u8 i;
2717
2718         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2719         if (!vsi)
2720                 return;
2721         ice_for_each_traffic_class(i) {
2722                 if (vsi->lan_q_ctx[i]) {
2723                         ice_free(hw, vsi->lan_q_ctx[i]);
2724                         vsi->lan_q_ctx[i] = NULL;
2725                 }
2726         }
2727 }
2728
2729 /**
2730  * ice_clear_vsi_ctx - clear the VSI context entry
2731  * @hw: pointer to the HW struct
2732  * @vsi_handle: VSI handle
2733  *
2734  * clear the VSI context entry
2735  */
2736 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2737 {
2738         struct ice_vsi_ctx *vsi;
2739
2740         vsi = ice_get_vsi_ctx(hw, vsi_handle);
2741         if (vsi) {
2742                 ice_clear_vsi_q_ctx(hw, vsi_handle);
2743                 ice_free(hw, vsi);
2744                 hw->vsi_ctx[vsi_handle] = NULL;
2745         }
2746 }
2747
2748 /**
2749  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2750  * @hw: pointer to the HW struct
2751  */
2752 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2753 {
2754         u16 i;
2755
2756         for (i = 0; i < ICE_MAX_VSI; i++)
2757                 ice_clear_vsi_ctx(hw, i);
2758 }
2759
2760 /**
2761  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2762  * @hw: pointer to the HW struct
2763  * @vsi_handle: unique VSI handle provided by drivers
2764  * @vsi_ctx: pointer to a VSI context struct
2765  * @cd: pointer to command details structure or NULL
2766  *
2767  * Add a VSI context to the hardware also add it into the VSI handle list.
2768  * If this function gets called after reset for existing VSIs then update
2769  * with the new HW VSI number in the corresponding VSI handle list entry.
2770  */
2771 enum ice_status
2772 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2773             struct ice_sq_cd *cd)
2774 {
2775         struct ice_vsi_ctx *tmp_vsi_ctx;
2776         enum ice_status status;
2777
2778         if (vsi_handle >= ICE_MAX_VSI)
2779                 return ICE_ERR_PARAM;
2780         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2781         if (status)
2782                 return status;
2783         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2784         if (!tmp_vsi_ctx) {
2785                 /* Create a new VSI context */
2786                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
2787                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2788                 if (!tmp_vsi_ctx) {
2789                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2790                         return ICE_ERR_NO_MEMORY;
2791                 }
2792                 *tmp_vsi_ctx = *vsi_ctx;
2793
2794                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2795         } else {
2796                 /* update with new HW VSI num */
2797                 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2798         }
2799
2800         return ICE_SUCCESS;
2801 }
2802
2803 /**
2804  * ice_free_vsi- free VSI context from hardware and VSI handle list
2805  * @hw: pointer to the HW struct
2806  * @vsi_handle: unique VSI handle
2807  * @vsi_ctx: pointer to a VSI context struct
2808  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2809  * @cd: pointer to command details structure or NULL
2810  *
2811  * Free VSI context info from hardware as well as from VSI handle list
2812  */
2813 enum ice_status
2814 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2815              bool keep_vsi_alloc, struct ice_sq_cd *cd)
2816 {
2817         enum ice_status status;
2818
2819         if (!ice_is_vsi_valid(hw, vsi_handle))
2820                 return ICE_ERR_PARAM;
2821         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2822         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2823         if (!status)
2824                 ice_clear_vsi_ctx(hw, vsi_handle);
2825         return status;
2826 }
2827
2828 /**
2829  * ice_update_vsi
2830  * @hw: pointer to the HW struct
2831  * @vsi_handle: unique VSI handle
2832  * @vsi_ctx: pointer to a VSI context struct
2833  * @cd: pointer to command details structure or NULL
2834  *
2835  * Update VSI context in the hardware
2836  */
2837 enum ice_status
2838 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2839                struct ice_sq_cd *cd)
2840 {
2841         if (!ice_is_vsi_valid(hw, vsi_handle))
2842                 return ICE_ERR_PARAM;
2843         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2844         return ice_aq_update_vsi(hw, vsi_ctx, cd);
2845 }
2846
2847 /**
2848  * ice_aq_get_vsi_params
2849  * @hw: pointer to the HW struct
2850  * @vsi_ctx: pointer to a VSI context struct
2851  * @cd: pointer to command details structure or NULL
2852  *
2853  * Get VSI context info from hardware (0x0212)
2854  */
2855 enum ice_status
2856 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2857                       struct ice_sq_cd *cd)
2858 {
2859         struct ice_aqc_add_get_update_free_vsi *cmd;
2860         struct ice_aqc_get_vsi_resp *resp;
2861         struct ice_aq_desc desc;
2862         enum ice_status status;
2863
2864         cmd = &desc.params.vsi_cmd;
2865         resp = &desc.params.get_vsi_resp;
2866
2867         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2868
2869         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2870
2871         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2872                                  sizeof(vsi_ctx->info), cd);
2873         if (!status) {
2874                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2875                                         ICE_AQ_VSI_NUM_M;
2876                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2877                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2878         }
2879
2880         return status;
2881 }
2882
2883 /**
2884  * ice_aq_add_update_mir_rule - add/update a mirror rule
2885  * @hw: pointer to the HW struct
2886  * @rule_type: Rule Type
2887  * @dest_vsi: VSI number to which packets will be mirrored
2888  * @count: length of the list
2889  * @mr_buf: buffer for list of mirrored VSI numbers
2890  * @cd: pointer to command details structure or NULL
2891  * @rule_id: Rule ID
2892  *
2893  * Add/Update Mirror Rule (0x260).
2894  */
2895 enum ice_status
2896 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2897                            u16 count, struct ice_mir_rule_buf *mr_buf,
2898                            struct ice_sq_cd *cd, u16 *rule_id)
2899 {
2900         struct ice_aqc_add_update_mir_rule *cmd;
2901         struct ice_aq_desc desc;
2902         enum ice_status status;
2903         __le16 *mr_list = NULL;
2904         u16 buf_size = 0;
2905
2906         switch (rule_type) {
2907         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2908         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2909                 /* Make sure count and mr_buf are set for these rule_types */
2910                 if (!(count && mr_buf))
2911                         return ICE_ERR_PARAM;
2912
2913                 buf_size = count * sizeof(__le16);
2914                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2915                 if (!mr_list)
2916                         return ICE_ERR_NO_MEMORY;
2917                 break;
2918         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2919         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2920                 /* Make sure count and mr_buf are not set for these
2921                  * rule_types
2922                  */
2923                 if (count || mr_buf)
2924                         return ICE_ERR_PARAM;
2925                 break;
2926         default:
2927                 ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2928                 return ICE_ERR_OUT_OF_RANGE;
2929         }
2930
2931         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2932
2933         /* Pre-process 'mr_buf' items for add/update of virtual port
2934          * ingress/egress mirroring (but not physical port ingress/egress
2935          * mirroring)
2936          */
2937         if (mr_buf) {
2938                 int i;
2939
2940                 for (i = 0; i < count; i++) {
2941                         u16 id;
2942
2943                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2944
2945                         /* Validate specified VSI number, make sure it is less
2946                          * than ICE_MAX_VSI, if not return with error.
2947                          */
2948                         if (id >= ICE_MAX_VSI) {
2949                                 ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2950                                           id);
2951                                 ice_free(hw, mr_list);
2952                                 return ICE_ERR_OUT_OF_RANGE;
2953                         }
2954
2955                         /* add VSI to mirror rule */
2956                         if (mr_buf[i].add)
2957                                 mr_list[i] =
2958                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2959                         else /* remove VSI from mirror rule */
2960                                 mr_list[i] = CPU_TO_LE16(id);
2961                 }
2962         }
2963
2964         cmd = &desc.params.add_update_rule;
2965         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2966                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2967                                            ICE_AQC_RULE_ID_VALID_M);
2968         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2969         cmd->num_entries = CPU_TO_LE16(count);
2970         cmd->dest = CPU_TO_LE16(dest_vsi);
2971
2972         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2973         if (!status)
2974                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2975
2976         ice_free(hw, mr_list);
2977
2978         return status;
2979 }
2980
2981 /**
2982  * ice_aq_delete_mir_rule - delete a mirror rule
2983  * @hw: pointer to the HW struct
2984  * @rule_id: Mirror rule ID (to be deleted)
2985  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2986  *               otherwise it is returned to the shared pool
2987  * @cd: pointer to command details structure or NULL
2988  *
2989  * Delete Mirror Rule (0x261).
2990  */
2991 enum ice_status
2992 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2993                        struct ice_sq_cd *cd)
2994 {
2995         struct ice_aqc_delete_mir_rule *cmd;
2996         struct ice_aq_desc desc;
2997
2998         /* rule_id should be in the range 0...63 */
2999         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3000                 return ICE_ERR_OUT_OF_RANGE;
3001
3002         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3003
3004         cmd = &desc.params.del_rule;
3005         rule_id |= ICE_AQC_RULE_ID_VALID_M;
3006         cmd->rule_id = CPU_TO_LE16(rule_id);
3007
3008         if (keep_allocd)
3009                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3010
3011         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3012 }
3013
3014 /**
3015  * ice_aq_alloc_free_vsi_list
3016  * @hw: pointer to the HW struct
3017  * @vsi_list_id: VSI list ID returned or used for lookup
3018  * @lkup_type: switch rule filter lookup type
3019  * @opc: switch rules population command type - pass in the command opcode
3020  *
3021  * allocates or free a VSI list resource
3022  */
3023 static enum ice_status
3024 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3025                            enum ice_sw_lkup_type lkup_type,
3026                            enum ice_adminq_opc opc)
3027 {
3028         struct ice_aqc_alloc_free_res_elem *sw_buf;
3029         struct ice_aqc_res_elem *vsi_ele;
3030         enum ice_status status;
3031         u16 buf_len;
3032
3033         buf_len = ice_struct_size(sw_buf, elem, 1);
3034         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3035         if (!sw_buf)
3036                 return ICE_ERR_NO_MEMORY;
3037         sw_buf->num_elems = CPU_TO_LE16(1);
3038
3039         if (lkup_type == ICE_SW_LKUP_MAC ||
3040             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3041             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3042             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3043             lkup_type == ICE_SW_LKUP_PROMISC ||
3044             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3045             lkup_type == ICE_SW_LKUP_LAST) {
3046                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3047         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
3048                 sw_buf->res_type =
3049                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3050         } else {
3051                 status = ICE_ERR_PARAM;
3052                 goto ice_aq_alloc_free_vsi_list_exit;
3053         }
3054
3055         if (opc == ice_aqc_opc_free_res)
3056                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3057
3058         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3059         if (status)
3060                 goto ice_aq_alloc_free_vsi_list_exit;
3061
3062         if (opc == ice_aqc_opc_alloc_res) {
3063                 vsi_ele = &sw_buf->elem[0];
3064                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3065         }
3066
3067 ice_aq_alloc_free_vsi_list_exit:
3068         ice_free(hw, sw_buf);
3069         return status;
3070 }
3071
3072 /**
3073  * ice_aq_set_storm_ctrl - Sets storm control configuration
3074  * @hw: pointer to the HW struct
3075  * @bcast_thresh: represents the upper threshold for broadcast storm control
3076  * @mcast_thresh: represents the upper threshold for multicast storm control
3077  * @ctl_bitmask: storm control knobs
3078  *
3079  * Sets the storm control configuration (0x0280)
3080  */
3081 enum ice_status
3082 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3083                       u32 ctl_bitmask)
3084 {
3085         struct ice_aqc_storm_cfg *cmd;
3086         struct ice_aq_desc desc;
3087
3088         cmd = &desc.params.storm_conf;
3089
3090         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3091
3092         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3093         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3094         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3095
3096         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3097 }
3098
3099 /**
3100  * ice_aq_get_storm_ctrl - gets storm control configuration
3101  * @hw: pointer to the HW struct
3102  * @bcast_thresh: represents the upper threshold for broadcast storm control
3103  * @mcast_thresh: represents the upper threshold for multicast storm control
3104  * @ctl_bitmask: storm control knobs
3105  *
3106  * Gets the storm control configuration (0x0281)
3107  */
3108 enum ice_status
3109 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3110                       u32 *ctl_bitmask)
3111 {
3112         enum ice_status status;
3113         struct ice_aq_desc desc;
3114
3115         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3116
3117         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3118         if (!status) {
3119                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3120
3121                 if (bcast_thresh)
3122                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3123                                 ICE_AQ_THRESHOLD_M;
3124                 if (mcast_thresh)
3125                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3126                                 ICE_AQ_THRESHOLD_M;
3127                 if (ctl_bitmask)
3128                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3129         }
3130
3131         return status;
3132 }
3133
3134 /**
3135  * ice_aq_sw_rules - add/update/remove switch rules
3136  * @hw: pointer to the HW struct
3137  * @rule_list: pointer to switch rule population list
3138  * @rule_list_sz: total size of the rule list in bytes
3139  * @num_rules: number of switch rules in the rule_list
3140  * @opc: switch rules population command type - pass in the command opcode
3141  * @cd: pointer to command details structure or NULL
3142  *
3143  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3144  */
3145 static enum ice_status
3146 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3147                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3148 {
3149         struct ice_aq_desc desc;
3150         enum ice_status status;
3151
3152         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3153
3154         if (opc != ice_aqc_opc_add_sw_rules &&
3155             opc != ice_aqc_opc_update_sw_rules &&
3156             opc != ice_aqc_opc_remove_sw_rules)
3157                 return ICE_ERR_PARAM;
3158
3159         ice_fill_dflt_direct_cmd_desc(&desc, opc);
3160
3161         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3162         desc.params.sw_rules.num_rules_fltr_entry_index =
3163                 CPU_TO_LE16(num_rules);
3164         status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3165         if (opc != ice_aqc_opc_add_sw_rules &&
3166             hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3167                 status = ICE_ERR_DOES_NOT_EXIST;
3168
3169         return status;
3170 }
3171
3172 /**
3173  * ice_aq_add_recipe - add switch recipe
3174  * @hw: pointer to the HW struct
3175  * @s_recipe_list: pointer to switch rule population list
3176  * @num_recipes: number of switch recipes in the list
3177  * @cd: pointer to command details structure or NULL
3178  *
3179  * Add(0x0290)
3180  */
3181 enum ice_status
3182 ice_aq_add_recipe(struct ice_hw *hw,
3183                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3184                   u16 num_recipes, struct ice_sq_cd *cd)
3185 {
3186         struct ice_aqc_add_get_recipe *cmd;
3187         struct ice_aq_desc desc;
3188         u16 buf_size;
3189
3190         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3191         cmd = &desc.params.add_get_recipe;
3192         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3193
3194         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3195         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3196
3197         buf_size = num_recipes * sizeof(*s_recipe_list);
3198
3199         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3200 }
3201
3202 /**
3203  * ice_aq_get_recipe - get switch recipe
3204  * @hw: pointer to the HW struct
3205  * @s_recipe_list: pointer to switch rule population list
3206  * @num_recipes: pointer to the number of recipes (input and output)
3207  * @recipe_root: root recipe number of recipe(s) to retrieve
3208  * @cd: pointer to command details structure or NULL
3209  *
3210  * Get(0x0292)
3211  *
3212  * On input, *num_recipes should equal the number of entries in s_recipe_list.
3213  * On output, *num_recipes will equal the number of entries returned in
3214  * s_recipe_list.
3215  *
3216  * The caller must supply enough space in s_recipe_list to hold all possible
3217  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3218  */
3219 enum ice_status
3220 ice_aq_get_recipe(struct ice_hw *hw,
3221                   struct ice_aqc_recipe_data_elem *s_recipe_list,
3222                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3223 {
3224         struct ice_aqc_add_get_recipe *cmd;
3225         struct ice_aq_desc desc;
3226         enum ice_status status;
3227         u16 buf_size;
3228
3229         if (*num_recipes != ICE_MAX_NUM_RECIPES)
3230                 return ICE_ERR_PARAM;
3231
3232         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3233         cmd = &desc.params.add_get_recipe;
3234         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3235
3236         cmd->return_index = CPU_TO_LE16(recipe_root);
3237         cmd->num_sub_recipes = 0;
3238
3239         buf_size = *num_recipes * sizeof(*s_recipe_list);
3240
3241         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3242         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3243
3244         return status;
3245 }
3246
3247 /**
3248  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3249  * @hw: pointer to the HW struct
3250  * @params: parameters used to update the default recipe
3251  *
3252  * This function only supports updating default recipes and it only supports
3253  * updating a single recipe based on the lkup_idx at a time.
3254  *
3255  * This is done as a read-modify-write operation. First, get the current recipe
3256  * contents based on the recipe's ID. Then modify the field vector index and
3257  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3258  * the pre-existing recipe with the modifications.
3259  */
3260 enum ice_status
3261 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3262                            struct ice_update_recipe_lkup_idx_params *params)
3263 {
3264         struct ice_aqc_recipe_data_elem *rcp_list;
3265         u16 num_recps = ICE_MAX_NUM_RECIPES;
3266         enum ice_status status;
3267
3268         rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3269         if (!rcp_list)
3270                 return ICE_ERR_NO_MEMORY;
3271
3272         /* read current recipe list from firmware */
3273         rcp_list->recipe_indx = params->rid;
3274         status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3275         if (status) {
3276                 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3277                           params->rid, status);
3278                 goto error_out;
3279         }
3280
3281         /* only modify existing recipe's lkup_idx and mask if valid, while
3282          * leaving all other fields the same, then update the recipe firmware
3283          */
3284         rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3285         if (params->mask_valid)
3286                 rcp_list->content.mask[params->lkup_idx] =
3287                         CPU_TO_LE16(params->mask);
3288
3289         if (params->ignore_valid)
3290                 rcp_list->content.lkup_indx[params->lkup_idx] |=
3291                         ICE_AQ_RECIPE_LKUP_IGNORE;
3292
3293         status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3294         if (status)
3295                 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3296                           params->rid, params->lkup_idx, params->fv_idx,
3297                           params->mask, params->mask_valid ? "true" : "false",
3298                           status);
3299
3300 error_out:
3301         ice_free(hw, rcp_list);
3302         return status;
3303 }
3304
3305 /**
3306  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3307  * @hw: pointer to the HW struct
3308  * @profile_id: package profile ID to associate the recipe with
3309  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3310  * @cd: pointer to command details structure or NULL
3311  * Recipe to profile association (0x0291)
3312  */
3313 enum ice_status
3314 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3315                              struct ice_sq_cd *cd)
3316 {
3317         struct ice_aqc_recipe_to_profile *cmd;
3318         struct ice_aq_desc desc;
3319
3320         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3321         cmd = &desc.params.recipe_to_profile;
3322         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3323         cmd->profile_id = CPU_TO_LE16(profile_id);
3324         /* Set the recipe ID bit in the bitmask to let the device know which
3325          * profile we are associating the recipe to
3326          */
3327         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3328                    ICE_NONDMA_TO_NONDMA);
3329
3330         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3331 }
3332
3333 /**
3334  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3335  * @hw: pointer to the HW struct
3336  * @profile_id: package profile ID to associate the recipe with
3337  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3338  * @cd: pointer to command details structure or NULL
3339  * Associate profile ID with given recipe (0x0293)
3340  */
3341 enum ice_status
3342 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3343                              struct ice_sq_cd *cd)
3344 {
3345         struct ice_aqc_recipe_to_profile *cmd;
3346         struct ice_aq_desc desc;
3347         enum ice_status status;
3348
3349         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3350         cmd = &desc.params.recipe_to_profile;
3351         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3352         cmd->profile_id = CPU_TO_LE16(profile_id);
3353
3354         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3355         if (!status)
3356                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
3357                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3358
3359         return status;
3360 }
3361
3362 /**
3363  * ice_alloc_recipe - add recipe resource
3364  * @hw: pointer to the hardware structure
3365  * @rid: recipe ID returned as response to AQ call
3366  */
3367 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3368 {
3369         struct ice_aqc_alloc_free_res_elem *sw_buf;
3370         enum ice_status status;
3371         u16 buf_len;
3372
3373         buf_len = ice_struct_size(sw_buf, elem, 1);
3374         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3375         if (!sw_buf)
3376                 return ICE_ERR_NO_MEMORY;
3377
3378         sw_buf->num_elems = CPU_TO_LE16(1);
3379         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3380                                         ICE_AQC_RES_TYPE_S) |
3381                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
3382         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3383                                        ice_aqc_opc_alloc_res, NULL);
3384         if (!status)
3385                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3386         ice_free(hw, sw_buf);
3387
3388         return status;
3389 }
3390
3391 /* ice_init_port_info - Initialize port_info with switch configuration data
3392  * @pi: pointer to port_info
3393  * @vsi_port_num: VSI number or port number
3394  * @type: Type of switch element (port or VSI)
3395  * @swid: switch ID of the switch the element is attached to
3396  * @pf_vf_num: PF or VF number
3397  * @is_vf: true if the element is a VF, false otherwise
3398  */
3399 static void
3400 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3401                    u16 swid, u16 pf_vf_num, bool is_vf)
3402 {
3403         switch (type) {
3404         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3405                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3406                 pi->sw_id = swid;
3407                 pi->pf_vf_num = pf_vf_num;
3408                 pi->is_vf = is_vf;
3409                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3410                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3411                 break;
3412         default:
3413                 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3414                 break;
3415         }
3416 }
3417
3418 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3419  * @hw: pointer to the hardware structure
3420  */
3421 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3422 {
3423         struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3424         enum ice_status status;
3425         u8 num_total_ports;
3426         u16 req_desc = 0;
3427         u16 num_elems;
3428         u8 j = 0;
3429         u16 i;
3430
3431         num_total_ports = 1;
3432
3433         rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3434                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3435
3436         if (!rbuf)
3437                 return ICE_ERR_NO_MEMORY;
3438
3439         /* Multiple calls to ice_aq_get_sw_cfg may be required
3440          * to get all the switch configuration information. The need
3441          * for additional calls is indicated by ice_aq_get_sw_cfg
3442          * writing a non-zero value in req_desc
3443          */
3444         do {
3445                 struct ice_aqc_get_sw_cfg_resp_elem *ele;
3446
3447                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3448                                            &req_desc, &num_elems, NULL);
3449
3450                 if (status)
3451                         break;
3452
3453                 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3454                         u16 pf_vf_num, swid, vsi_port_num;
3455                         bool is_vf = false;
3456                         u8 res_type;
3457
3458                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3459                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3460
3461                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3462                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3463
3464                         swid = LE16_TO_CPU(ele->swid);
3465
3466                         if (LE16_TO_CPU(ele->pf_vf_num) &
3467                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3468                                 is_vf = true;
3469
3470                         res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3471                                         ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3472
3473                         switch (res_type) {
3474                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3475                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3476                                 if (j == num_total_ports) {
3477                                         ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3478                                         status = ICE_ERR_CFG;
3479                                         goto out;
3480                                 }
3481                                 ice_init_port_info(hw->port_info,
3482                                                    vsi_port_num, res_type, swid,
3483                                                    pf_vf_num, is_vf);
3484                                 j++;
3485                                 break;
3486                         default:
3487                                 break;
3488                         }
3489                 }
3490         } while (req_desc && !status);
3491
3492 out:
3493         ice_free(hw, rbuf);
3494         return status;
3495 }
3496
3497 /**
3498  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3499  * @hw: pointer to the hardware structure
3500  * @fi: filter info structure to fill/update
3501  *
3502  * This helper function populates the lb_en and lan_en elements of the provided
3503  * ice_fltr_info struct using the switch's type and characteristics of the
3504  * switch rule being configured.
3505  */
3506 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3507 {
3508         if ((fi->flag & ICE_FLTR_RX) &&
3509             (fi->fltr_act == ICE_FWD_TO_VSI ||
3510              fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3511             fi->lkup_type == ICE_SW_LKUP_LAST)
3512                 fi->lan_en = true;
3513         fi->lb_en = false;
3514         fi->lan_en = false;
3515         if ((fi->flag & ICE_FLTR_TX) &&
3516             (fi->fltr_act == ICE_FWD_TO_VSI ||
3517              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3518              fi->fltr_act == ICE_FWD_TO_Q ||
3519              fi->fltr_act == ICE_FWD_TO_QGRP)) {
3520                 /* Setting LB for prune actions will result in replicated
3521                  * packets to the internal switch that will be dropped.
3522                  */
3523                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3524                         fi->lb_en = true;
3525
3526                 /* Set lan_en to TRUE if
3527                  * 1. The switch is a VEB AND
3528                  * 2
3529                  * 2.1 The lookup is a directional lookup like ethertype,
3530                  * promiscuous, ethertype-MAC, promiscuous-VLAN
3531                  * and default-port OR
3532                  * 2.2 The lookup is VLAN, OR
3533                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3534                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3535                  *
3536                  * OR
3537                  *
3538                  * The switch is a VEPA.
3539                  *
3540                  * In all other cases, the LAN enable has to be set to false.
3541                  */
3542                 if (hw->evb_veb) {
3543                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3544                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3545                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3546                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3547                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
3548                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
3549                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
3550                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3551                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3552                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3553                                 fi->lan_en = true;
3554                 } else {
3555                         fi->lan_en = true;
3556                 }
3557         }
3558 }
3559
3560 /**
3561  * ice_fill_sw_rule - Helper function to fill switch rule structure
3562  * @hw: pointer to the hardware structure
3563  * @f_info: entry containing packet forwarding information
3564  * @s_rule: switch rule structure to be filled in based on mac_entry
3565  * @opc: switch rules population command type - pass in the command opcode
3566  */
3567 static void
3568 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3569                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3570 {
3571         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3572         u16 vlan_tpid = ICE_ETH_P_8021Q;
3573         void *daddr = NULL;
3574         u16 eth_hdr_sz;
3575         u8 *eth_hdr;
3576         u32 act = 0;
3577         __be16 *off;
3578         u8 q_rgn;
3579
3580         if (opc == ice_aqc_opc_remove_sw_rules) {
3581                 s_rule->pdata.lkup_tx_rx.act = 0;
3582                 s_rule->pdata.lkup_tx_rx.index =
3583                         CPU_TO_LE16(f_info->fltr_rule_id);
3584                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3585                 return;
3586         }
3587
3588         eth_hdr_sz = sizeof(dummy_eth_header);
3589         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3590
3591         /* initialize the ether header with a dummy header */
3592         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3593         ice_fill_sw_info(hw, f_info);
3594
3595         switch (f_info->fltr_act) {
3596         case ICE_FWD_TO_VSI:
3597                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3598                         ICE_SINGLE_ACT_VSI_ID_M;
3599                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3600                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3601                                 ICE_SINGLE_ACT_VALID_BIT;
3602                 break;
3603         case ICE_FWD_TO_VSI_LIST:
3604                 act |= ICE_SINGLE_ACT_VSI_LIST;
3605                 act |= (f_info->fwd_id.vsi_list_id <<
3606                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3607                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
3608                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3609                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3610                                 ICE_SINGLE_ACT_VALID_BIT;
3611                 break;
3612         case ICE_FWD_TO_Q:
3613                 act |= ICE_SINGLE_ACT_TO_Q;
3614                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3615                         ICE_SINGLE_ACT_Q_INDEX_M;
3616                 break;
3617         case ICE_DROP_PACKET:
3618                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3619                         ICE_SINGLE_ACT_VALID_BIT;
3620                 break;
3621         case ICE_FWD_TO_QGRP:
3622                 q_rgn = f_info->qgrp_size > 0 ?
3623                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
3624                 act |= ICE_SINGLE_ACT_TO_Q;
3625                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3626                         ICE_SINGLE_ACT_Q_INDEX_M;
3627                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3628                         ICE_SINGLE_ACT_Q_REGION_M;
3629                 break;
3630         default:
3631                 return;
3632         }
3633
3634         if (f_info->lb_en)
3635                 act |= ICE_SINGLE_ACT_LB_ENABLE;
3636         if (f_info->lan_en)
3637                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
3638
3639         switch (f_info->lkup_type) {
3640         case ICE_SW_LKUP_MAC:
3641                 daddr = f_info->l_data.mac.mac_addr;
3642                 break;
3643         case ICE_SW_LKUP_VLAN:
3644                 vlan_id = f_info->l_data.vlan.vlan_id;
3645                 if (f_info->l_data.vlan.tpid_valid)
3646                         vlan_tpid = f_info->l_data.vlan.tpid;
3647                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3648                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3649                         act |= ICE_SINGLE_ACT_PRUNE;
3650                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3651                 }
3652                 break;
3653         case ICE_SW_LKUP_ETHERTYPE_MAC:
3654                 daddr = f_info->l_data.ethertype_mac.mac_addr;
3655                 /* fall-through */
3656         case ICE_SW_LKUP_ETHERTYPE:
3657                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3658                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3659                 break;
3660         case ICE_SW_LKUP_MAC_VLAN:
3661                 daddr = f_info->l_data.mac_vlan.mac_addr;
3662                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3663                 break;
3664         case ICE_SW_LKUP_PROMISC_VLAN:
3665                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
3666                 /* fall-through */
3667         case ICE_SW_LKUP_PROMISC:
3668                 daddr = f_info->l_data.mac_vlan.mac_addr;
3669                 break;
3670         default:
3671                 break;
3672         }
3673
3674         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3675                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3676                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3677
3678         /* Recipe set depending on lookup type */
3679         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3680         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3681         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3682
3683         if (daddr)
3684                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3685                            ICE_NONDMA_TO_NONDMA);
3686
3687         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3688                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3689                 *off = CPU_TO_BE16(vlan_id);
3690                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3691                 *off = CPU_TO_BE16(vlan_tpid);
3692         }
3693
3694         /* Create the switch rule with the final dummy Ethernet header */
3695         if (opc != ice_aqc_opc_update_sw_rules)
3696                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3697 }
3698
3699 /**
3700  * ice_add_marker_act
3701  * @hw: pointer to the hardware structure
3702  * @m_ent: the management entry for which sw marker needs to be added
3703  * @sw_marker: sw marker to tag the Rx descriptor with
3704  * @l_id: large action resource ID
3705  *
3706  * Create a large action to hold software marker and update the switch rule
3707  * entry pointed by m_ent with newly created large action
3708  */
3709 static enum ice_status
3710 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3711                    u16 sw_marker, u16 l_id)
3712 {
3713         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3714         /* For software marker we need 3 large actions
3715          * 1. FWD action: FWD TO VSI or VSI LIST
3716          * 2. GENERIC VALUE action to hold the profile ID
3717          * 3. GENERIC VALUE action to hold the software marker ID
3718          */
3719         const u16 num_lg_acts = 3;
3720         enum ice_status status;
3721         u16 lg_act_size;
3722         u16 rules_size;
3723         u32 act;
3724         u16 id;
3725
3726         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3727                 return ICE_ERR_PARAM;
3728
3729         /* Create two back-to-back switch rules and submit them to the HW using
3730          * one memory buffer:
3731          *    1. Large Action
3732          *    2. Look up Tx Rx
3733          */
3734         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3735         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3736         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3737         if (!lg_act)
3738                 return ICE_ERR_NO_MEMORY;
3739
3740         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3741
3742         /* Fill in the first switch rule i.e. large action */
3743         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3744         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3745         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3746
3747         /* First action VSI forwarding or VSI list forwarding depending on how
3748          * many VSIs
3749          */
3750         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3751                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3752
3753         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3754         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3755         if (m_ent->vsi_count > 1)
3756                 act |= ICE_LG_ACT_VSI_LIST;
3757         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3758
3759         /* Second action descriptor type */
3760         act = ICE_LG_ACT_GENERIC;
3761
3762         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3763         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3764
3765         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3766                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3767
3768         /* Third action Marker value */
3769         act |= ICE_LG_ACT_GENERIC;
3770         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3771                 ICE_LG_ACT_GENERIC_VALUE_M;
3772
3773         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3774
3775         /* call the fill switch rule to fill the lookup Tx Rx structure */
3776         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3777                          ice_aqc_opc_update_sw_rules);
3778
3779         /* Update the action to point to the large action ID */
3780         rx_tx->pdata.lkup_tx_rx.act =
3781                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3782                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3783                              ICE_SINGLE_ACT_PTR_VAL_M));
3784
3785         /* Use the filter rule ID of the previously created rule with single
3786          * act. Once the update happens, hardware will treat this as large
3787          * action
3788          */
3789         rx_tx->pdata.lkup_tx_rx.index =
3790                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3791
3792         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3793                                  ice_aqc_opc_update_sw_rules, NULL);
3794         if (!status) {
3795                 m_ent->lg_act_idx = l_id;
3796                 m_ent->sw_marker_id = sw_marker;
3797         }
3798
3799         ice_free(hw, lg_act);
3800         return status;
3801 }
3802
3803 /**
3804  * ice_add_counter_act - add/update filter rule with counter action
3805  * @hw: pointer to the hardware structure
3806  * @m_ent: the management entry for which counter needs to be added
3807  * @counter_id: VLAN counter ID returned as part of allocate resource
3808  * @l_id: large action resource ID
3809  */
3810 static enum ice_status
3811 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3812                     u16 counter_id, u16 l_id)
3813 {
3814         struct ice_aqc_sw_rules_elem *lg_act;
3815         struct ice_aqc_sw_rules_elem *rx_tx;
3816         enum ice_status status;
3817         /* 2 actions will be added while adding a large action counter */
3818         const int num_acts = 2;
3819         u16 lg_act_size;
3820         u16 rules_size;
3821         u16 f_rule_id;
3822         u32 act;
3823         u16 id;
3824
3825         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3826                 return ICE_ERR_PARAM;
3827
3828         /* Create two back-to-back switch rules and submit them to the HW using
3829          * one memory buffer:
3830          * 1. Large Action
3831          * 2. Look up Tx Rx
3832          */
3833         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3834         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3835         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3836         if (!lg_act)
3837                 return ICE_ERR_NO_MEMORY;
3838
3839         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3840
3841         /* Fill in the first switch rule i.e. large action */
3842         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3843         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3844         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3845
3846         /* First action VSI forwarding or VSI list forwarding depending on how
3847          * many VSIs
3848          */
3849         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3850                 m_ent->fltr_info.fwd_id.hw_vsi_id;
3851
3852         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3853         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3854                 ICE_LG_ACT_VSI_LIST_ID_M;
3855         if (m_ent->vsi_count > 1)
3856                 act |= ICE_LG_ACT_VSI_LIST;
3857         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3858
3859         /* Second action counter ID */
3860         act = ICE_LG_ACT_STAT_COUNT;
3861         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3862                 ICE_LG_ACT_STAT_COUNT_M;
3863         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3864
3865         /* call the fill switch rule to fill the lookup Tx Rx structure */
3866         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3867                          ice_aqc_opc_update_sw_rules);
3868
3869         act = ICE_SINGLE_ACT_PTR;
3870         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3871         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3872
3873         /* Use the filter rule ID of the previously created rule with single
3874          * act. Once the update happens, hardware will treat this as large
3875          * action
3876          */
3877         f_rule_id = m_ent->fltr_info.fltr_rule_id;
3878         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3879
3880         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3881                                  ice_aqc_opc_update_sw_rules, NULL);
3882         if (!status) {
3883                 m_ent->lg_act_idx = l_id;
3884                 m_ent->counter_index = counter_id;
3885         }
3886
3887         ice_free(hw, lg_act);
3888         return status;
3889 }
3890
3891 /**
3892  * ice_create_vsi_list_map
3893  * @hw: pointer to the hardware structure
3894  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3895  * @num_vsi: number of VSI handles in the array
3896  * @vsi_list_id: VSI list ID generated as part of allocate resource
3897  *
3898  * Helper function to create a new entry of VSI list ID to VSI mapping
3899  * using the given VSI list ID
3900  */
3901 static struct ice_vsi_list_map_info *
3902 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3903                         u16 vsi_list_id)
3904 {
3905         struct ice_switch_info *sw = hw->switch_info;
3906         struct ice_vsi_list_map_info *v_map;
3907         int i;
3908
3909         v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3910         if (!v_map)
3911                 return NULL;
3912
3913         v_map->vsi_list_id = vsi_list_id;
3914         v_map->ref_cnt = 1;
3915         for (i = 0; i < num_vsi; i++)
3916                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3917
3918         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3919         return v_map;
3920 }
3921
3922 /**
3923  * ice_update_vsi_list_rule
3924  * @hw: pointer to the hardware structure
3925  * @vsi_handle_arr: array of VSI handles to form a VSI list
3926  * @num_vsi: number of VSI handles in the array
3927  * @vsi_list_id: VSI list ID generated as part of allocate resource
3928  * @remove: Boolean value to indicate if this is a remove action
3929  * @opc: switch rules population command type - pass in the command opcode
3930  * @lkup_type: lookup type of the filter
3931  *
3932  * Call AQ command to add a new switch rule or update existing switch rule
3933  * using the given VSI list ID
3934  */
3935 static enum ice_status
3936 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3937                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3938                          enum ice_sw_lkup_type lkup_type)
3939 {
3940         struct ice_aqc_sw_rules_elem *s_rule;
3941         enum ice_status status;
3942         u16 s_rule_size;
3943         u16 rule_type;
3944         int i;
3945
3946         if (!num_vsi)
3947                 return ICE_ERR_PARAM;
3948
3949         if (lkup_type == ICE_SW_LKUP_MAC ||
3950             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3951             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3952             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3953             lkup_type == ICE_SW_LKUP_PROMISC ||
3954             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3955             lkup_type == ICE_SW_LKUP_LAST)
3956                 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3957                         ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3958         else if (lkup_type == ICE_SW_LKUP_VLAN)
3959                 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3960                         ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3961         else
3962                 return ICE_ERR_PARAM;
3963
3964         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3965         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3966         if (!s_rule)
3967                 return ICE_ERR_NO_MEMORY;
3968         for (i = 0; i < num_vsi; i++) {
3969                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3970                         status = ICE_ERR_PARAM;
3971                         goto exit;
3972                 }
3973                 /* AQ call requires hw_vsi_id(s) */
3974                 s_rule->pdata.vsi_list.vsi[i] =
3975                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3976         }
3977
3978         s_rule->type = CPU_TO_LE16(rule_type);
3979         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3980         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3981
3982         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3983
3984 exit:
3985         ice_free(hw, s_rule);
3986         return status;
3987 }
3988
3989 /**
3990  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3991  * @hw: pointer to the HW struct
3992  * @vsi_handle_arr: array of VSI handles to form a VSI list
3993  * @num_vsi: number of VSI handles in the array
3994  * @vsi_list_id: stores the ID of the VSI list to be created
3995  * @lkup_type: switch rule filter's lookup type
3996  */
3997 static enum ice_status
3998 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3999                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4000 {
4001         enum ice_status status;
4002
4003         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4004                                             ice_aqc_opc_alloc_res);
4005         if (status)
4006                 return status;
4007
4008         /* Update the newly created VSI list to include the specified VSIs */
4009         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4010                                         *vsi_list_id, false,
4011                                         ice_aqc_opc_add_sw_rules, lkup_type);
4012 }
4013
4014 /**
4015  * ice_create_pkt_fwd_rule
4016  * @hw: pointer to the hardware structure
4017  * @recp_list: corresponding filter management list
4018  * @f_entry: entry containing packet forwarding information
4019  *
4020  * Create switch rule with given filter information and add an entry
4021  * to the corresponding filter management list to track this switch rule
4022  * and VSI mapping
4023  */
4024 static enum ice_status
4025 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4026                         struct ice_fltr_list_entry *f_entry)
4027 {
4028         struct ice_fltr_mgmt_list_entry *fm_entry;
4029         struct ice_aqc_sw_rules_elem *s_rule;
4030         enum ice_status status;
4031
4032         s_rule = (struct ice_aqc_sw_rules_elem *)
4033                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4034         if (!s_rule)
4035                 return ICE_ERR_NO_MEMORY;
4036         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4037                    ice_malloc(hw, sizeof(*fm_entry));
4038         if (!fm_entry) {
4039                 status = ICE_ERR_NO_MEMORY;
4040                 goto ice_create_pkt_fwd_rule_exit;
4041         }
4042
4043         fm_entry->fltr_info = f_entry->fltr_info;
4044
4045         /* Initialize all the fields for the management entry */
4046         fm_entry->vsi_count = 1;
4047         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4048         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4049         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4050
4051         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4052                          ice_aqc_opc_add_sw_rules);
4053
4054         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4055                                  ice_aqc_opc_add_sw_rules, NULL);
4056         if (status) {
4057                 ice_free(hw, fm_entry);
4058                 goto ice_create_pkt_fwd_rule_exit;
4059         }
4060
4061         f_entry->fltr_info.fltr_rule_id =
4062                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4063         fm_entry->fltr_info.fltr_rule_id =
4064                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4065
4066         /* The book keeping entries will get removed when base driver
4067          * calls remove filter AQ command
4068          */
4069         LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4070
4071 ice_create_pkt_fwd_rule_exit:
4072         ice_free(hw, s_rule);
4073         return status;
4074 }
4075
4076 /**
4077  * ice_update_pkt_fwd_rule
4078  * @hw: pointer to the hardware structure
4079  * @f_info: filter information for switch rule
4080  *
4081  * Call AQ command to update a previously created switch rule with a
4082  * VSI list ID
4083  */
4084 static enum ice_status
4085 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4086 {
4087         struct ice_aqc_sw_rules_elem *s_rule;
4088         enum ice_status status;
4089
4090         s_rule = (struct ice_aqc_sw_rules_elem *)
4091                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4092         if (!s_rule)
4093                 return ICE_ERR_NO_MEMORY;
4094
4095         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4096
4097         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4098
4099         /* Update switch rule with new rule set to forward VSI list */
4100         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4101                                  ice_aqc_opc_update_sw_rules, NULL);
4102
4103         ice_free(hw, s_rule);
4104         return status;
4105 }
4106
4107 /**
4108  * ice_update_sw_rule_bridge_mode
4109  * @hw: pointer to the HW struct
4110  *
4111  * Updates unicast switch filter rules based on VEB/VEPA mode
4112  */
4113 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4114 {
4115         struct ice_switch_info *sw = hw->switch_info;
4116         struct ice_fltr_mgmt_list_entry *fm_entry;
4117         enum ice_status status = ICE_SUCCESS;
4118         struct LIST_HEAD_TYPE *rule_head;
4119         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4120
4121         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4122         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4123
4124         ice_acquire_lock(rule_lock);
4125         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4126                             list_entry) {
4127                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
4128                 u8 *addr = fi->l_data.mac.mac_addr;
4129
4130                 /* Update unicast Tx rules to reflect the selected
4131                  * VEB/VEPA mode
4132                  */
4133                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4134                     (fi->fltr_act == ICE_FWD_TO_VSI ||
4135                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4136                      fi->fltr_act == ICE_FWD_TO_Q ||
4137                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
4138                         status = ice_update_pkt_fwd_rule(hw, fi);
4139                         if (status)
4140                                 break;
4141                 }
4142         }
4143
4144         ice_release_lock(rule_lock);
4145
4146         return status;
4147 }
4148
4149 /**
4150  * ice_add_update_vsi_list
4151  * @hw: pointer to the hardware structure
4152  * @m_entry: pointer to current filter management list entry
4153  * @cur_fltr: filter information from the book keeping entry
4154  * @new_fltr: filter information with the new VSI to be added
4155  *
4156  * Call AQ command to add or update previously created VSI list with new VSI.
4157  *
4158  * Helper function to do book keeping associated with adding filter information
4159  * The algorithm to do the book keeping is described below :
4160  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4161  *      if only one VSI has been added till now
4162  *              Allocate a new VSI list and add two VSIs
4163  *              to this list using switch rule command
4164  *              Update the previously created switch rule with the
4165  *              newly created VSI list ID
4166  *      if a VSI list was previously created
4167  *              Add the new VSI to the previously created VSI list set
4168  *              using the update switch rule command
4169  */
4170 static enum ice_status
4171 ice_add_update_vsi_list(struct ice_hw *hw,
4172                         struct ice_fltr_mgmt_list_entry *m_entry,
4173                         struct ice_fltr_info *cur_fltr,
4174                         struct ice_fltr_info *new_fltr)
4175 {
4176         enum ice_status status = ICE_SUCCESS;
4177         u16 vsi_list_id = 0;
4178
4179         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4180              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4181                 return ICE_ERR_NOT_IMPL;
4182
4183         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4184              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4185             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4186              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4187                 return ICE_ERR_NOT_IMPL;
4188
4189         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4190                 /* Only one entry existed in the mapping and it was not already
4191                  * a part of a VSI list. So, create a VSI list with the old and
4192                  * new VSIs.
4193                  */
4194                 struct ice_fltr_info tmp_fltr;
4195                 u16 vsi_handle_arr[2];
4196
4197                 /* A rule already exists with the new VSI being added */
4198                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4199                         return ICE_ERR_ALREADY_EXISTS;
4200
4201                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
4202                 vsi_handle_arr[1] = new_fltr->vsi_handle;
4203                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4204                                                   &vsi_list_id,
4205                                                   new_fltr->lkup_type);
4206                 if (status)
4207                         return status;
4208
4209                 tmp_fltr = *new_fltr;
4210                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4211                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4212                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4213                 /* Update the previous switch rule of "MAC forward to VSI" to
4214                  * "MAC fwd to VSI list"
4215                  */
4216                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4217                 if (status)
4218                         return status;
4219
4220                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4221                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4222                 m_entry->vsi_list_info =
4223                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4224                                                 vsi_list_id);
4225
4226                 if (!m_entry->vsi_list_info)
4227                         return ICE_ERR_NO_MEMORY;
4228
4229                 /* If this entry was large action then the large action needs
4230                  * to be updated to point to FWD to VSI list
4231                  */
4232                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4233                         status =
4234                             ice_add_marker_act(hw, m_entry,
4235                                                m_entry->sw_marker_id,
4236                                                m_entry->lg_act_idx);
4237         } else {
4238                 u16 vsi_handle = new_fltr->vsi_handle;
4239                 enum ice_adminq_opc opcode;
4240
4241                 if (!m_entry->vsi_list_info)
4242                         return ICE_ERR_CFG;
4243
4244                 /* A rule already exists with the new VSI being added */
4245                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4246                         return ICE_SUCCESS;
4247
4248                 /* Update the previously created VSI list set with
4249                  * the new VSI ID passed in
4250                  */
4251                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4252                 opcode = ice_aqc_opc_update_sw_rules;
4253
4254                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4255                                                   vsi_list_id, false, opcode,
4256                                                   new_fltr->lkup_type);
4257                 /* update VSI list mapping info with new VSI ID */
4258                 if (!status)
4259                         ice_set_bit(vsi_handle,
4260                                     m_entry->vsi_list_info->vsi_map);
4261         }
4262         if (!status)
4263                 m_entry->vsi_count++;
4264         return status;
4265 }
4266
4267 /**
4268  * ice_find_rule_entry - Search a rule entry
4269  * @list_head: head of rule list
4270  * @f_info: rule information
4271  *
4272  * Helper function to search for a given rule entry
4273  * Returns pointer to entry storing the rule if found
4274  */
4275 static struct ice_fltr_mgmt_list_entry *
4276 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4277                     struct ice_fltr_info *f_info)
4278 {
4279         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4280
4281         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4282                             list_entry) {
4283                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4284                             sizeof(f_info->l_data)) &&
4285                     f_info->flag == list_itr->fltr_info.flag) {
4286                         ret = list_itr;
4287                         break;
4288                 }
4289         }
4290         return ret;
4291 }
4292
4293 /**
4294  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4295  * @recp_list: VSI lists needs to be searched
4296  * @vsi_handle: VSI handle to be found in VSI list
4297  * @vsi_list_id: VSI list ID found containing vsi_handle
4298  *
4299  * Helper function to search a VSI list with single entry containing given VSI
4300  * handle element. This can be extended further to search VSI list with more
4301  * than 1 vsi_count. Returns pointer to VSI list entry if found.
4302  */
4303 static struct ice_vsi_list_map_info *
4304 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4305                         u16 *vsi_list_id)
4306 {
4307         struct ice_vsi_list_map_info *map_info = NULL;
4308         struct LIST_HEAD_TYPE *list_head;
4309
4310         list_head = &recp_list->filt_rules;
4311         if (recp_list->adv_rule) {
4312                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
4313
4314                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4315                                     ice_adv_fltr_mgmt_list_entry,
4316                                     list_entry) {
4317                         if (list_itr->vsi_list_info) {
4318                                 map_info = list_itr->vsi_list_info;
4319                                 if (ice_is_bit_set(map_info->vsi_map,
4320                                                    vsi_handle)) {
4321                                         *vsi_list_id = map_info->vsi_list_id;
4322                                         return map_info;
4323                                 }
4324                         }
4325                 }
4326         } else {
4327                 struct ice_fltr_mgmt_list_entry *list_itr;
4328
4329                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
4330                                     ice_fltr_mgmt_list_entry,
4331                                     list_entry) {
4332                         if (list_itr->vsi_count == 1 &&
4333                             list_itr->vsi_list_info) {
4334                                 map_info = list_itr->vsi_list_info;
4335                                 if (ice_is_bit_set(map_info->vsi_map,
4336                                                    vsi_handle)) {
4337                                         *vsi_list_id = map_info->vsi_list_id;
4338                                         return map_info;
4339                                 }
4340                         }
4341                 }
4342         }
4343         return NULL;
4344 }
4345
4346 /**
4347  * ice_add_rule_internal - add rule for a given lookup type
4348  * @hw: pointer to the hardware structure
4349  * @recp_list: recipe list for which rule has to be added
4350  * @lport: logic port number on which function add rule
4351  * @f_entry: structure containing MAC forwarding information
4352  *
4353  * Adds or updates the rule lists for a given recipe
4354  */
4355 static enum ice_status
4356 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4357                       u8 lport, struct ice_fltr_list_entry *f_entry)
4358 {
4359         struct ice_fltr_info *new_fltr, *cur_fltr;
4360         struct ice_fltr_mgmt_list_entry *m_entry;
4361         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4362         enum ice_status status = ICE_SUCCESS;
4363
4364         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4365                 return ICE_ERR_PARAM;
4366
4367         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4368         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4369                 f_entry->fltr_info.fwd_id.hw_vsi_id =
4370                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4371
4372         rule_lock = &recp_list->filt_rule_lock;
4373
4374         ice_acquire_lock(rule_lock);
4375         new_fltr = &f_entry->fltr_info;
4376         if (new_fltr->flag & ICE_FLTR_RX)
4377                 new_fltr->src = lport;
4378         else if (new_fltr->flag & ICE_FLTR_TX)
4379                 new_fltr->src =
4380                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4381
4382         m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4383         if (!m_entry) {
4384                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4385                 goto exit_add_rule_internal;
4386         }
4387
4388         cur_fltr = &m_entry->fltr_info;
4389         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4390
4391 exit_add_rule_internal:
4392         ice_release_lock(rule_lock);
4393         return status;
4394 }
4395
4396 /**
4397  * ice_remove_vsi_list_rule
4398  * @hw: pointer to the hardware structure
4399  * @vsi_list_id: VSI list ID generated as part of allocate resource
4400  * @lkup_type: switch rule filter lookup type
4401  *
4402  * The VSI list should be emptied before this function is called to remove the
4403  * VSI list.
4404  */
4405 static enum ice_status
4406 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4407                          enum ice_sw_lkup_type lkup_type)
4408 {
4409         /* Free the vsi_list resource that we allocated. It is assumed that the
4410          * list is empty at this point.
4411          */
4412         return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4413                                             ice_aqc_opc_free_res);
4414 }
4415
4416 /**
4417  * ice_rem_update_vsi_list
4418  * @hw: pointer to the hardware structure
4419  * @vsi_handle: VSI handle of the VSI to remove
4420  * @fm_list: filter management entry for which the VSI list management needs to
4421  *           be done
4422  */
4423 static enum ice_status
4424 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4425                         struct ice_fltr_mgmt_list_entry *fm_list)
4426 {
4427         enum ice_sw_lkup_type lkup_type;
4428         enum ice_status status = ICE_SUCCESS;
4429         u16 vsi_list_id;
4430
4431         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4432             fm_list->vsi_count == 0)
4433                 return ICE_ERR_PARAM;
4434
4435         /* A rule with the VSI being removed does not exist */
4436         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4437                 return ICE_ERR_DOES_NOT_EXIST;
4438
4439         lkup_type = fm_list->fltr_info.lkup_type;
4440         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4441         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4442                                           ice_aqc_opc_update_sw_rules,
4443                                           lkup_type);
4444         if (status)
4445                 return status;
4446
4447         fm_list->vsi_count--;
4448         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4449
4450         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4451                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4452                 struct ice_vsi_list_map_info *vsi_list_info =
4453                         fm_list->vsi_list_info;
4454                 u16 rem_vsi_handle;
4455
4456                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4457                                                     ICE_MAX_VSI);
4458                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4459                         return ICE_ERR_OUT_OF_RANGE;
4460
4461                 /* Make sure VSI list is empty before removing it below */
4462                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4463                                                   vsi_list_id, true,
4464                                                   ice_aqc_opc_update_sw_rules,
4465                                                   lkup_type);
4466                 if (status)
4467                         return status;
4468
4469                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4470                 tmp_fltr_info.fwd_id.hw_vsi_id =
4471                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
4472                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
4473                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4474                 if (status) {
4475                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4476                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
4477                         return status;
4478                 }
4479
4480                 fm_list->fltr_info = tmp_fltr_info;
4481         }
4482
4483         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4484             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4485                 struct ice_vsi_list_map_info *vsi_list_info =
4486                         fm_list->vsi_list_info;
4487
4488                 /* Remove the VSI list since it is no longer used */
4489                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4490                 if (status) {
4491                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4492                                   vsi_list_id, status);
4493                         return status;
4494                 }
4495
4496                 LIST_DEL(&vsi_list_info->list_entry);
4497                 ice_free(hw, vsi_list_info);
4498                 fm_list->vsi_list_info = NULL;
4499         }
4500
4501         return status;
4502 }
4503
4504 /**
4505  * ice_remove_rule_internal - Remove a filter rule of a given type
4506  *
4507  * @hw: pointer to the hardware structure
4508  * @recp_list: recipe list for which the rule needs to removed
4509  * @f_entry: rule entry containing filter information
4510  */
4511 static enum ice_status
4512 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4513                          struct ice_fltr_list_entry *f_entry)
4514 {
4515         struct ice_fltr_mgmt_list_entry *list_elem;
4516         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4517         enum ice_status status = ICE_SUCCESS;
4518         bool remove_rule = false;
4519         u16 vsi_handle;
4520
4521         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4522                 return ICE_ERR_PARAM;
4523         f_entry->fltr_info.fwd_id.hw_vsi_id =
4524                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4525
4526         rule_lock = &recp_list->filt_rule_lock;
4527         ice_acquire_lock(rule_lock);
4528         list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4529                                         &f_entry->fltr_info);
4530         if (!list_elem) {
4531                 status = ICE_ERR_DOES_NOT_EXIST;
4532                 goto exit;
4533         }
4534
4535         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4536                 remove_rule = true;
4537         } else if (!list_elem->vsi_list_info) {
4538                 status = ICE_ERR_DOES_NOT_EXIST;
4539                 goto exit;
4540         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
4541                 /* a ref_cnt > 1 indicates that the vsi_list is being
4542                  * shared by multiple rules. Decrement the ref_cnt and
4543                  * remove this rule, but do not modify the list, as it
4544                  * is in-use by other rules.
4545                  */
4546                 list_elem->vsi_list_info->ref_cnt--;
4547                 remove_rule = true;
4548         } else {
4549                 /* a ref_cnt of 1 indicates the vsi_list is only used
4550                  * by one rule. However, the original removal request is only
4551                  * for a single VSI. Update the vsi_list first, and only
4552                  * remove the rule if there are no further VSIs in this list.
4553                  */
4554                 vsi_handle = f_entry->fltr_info.vsi_handle;
4555                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4556                 if (status)
4557                         goto exit;
4558                 /* if VSI count goes to zero after updating the VSI list */
4559                 if (list_elem->vsi_count == 0)
4560                         remove_rule = true;
4561         }
4562
4563         if (remove_rule) {
4564                 /* Remove the lookup rule */
4565                 struct ice_aqc_sw_rules_elem *s_rule;
4566
4567                 s_rule = (struct ice_aqc_sw_rules_elem *)
4568                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4569                 if (!s_rule) {
4570                         status = ICE_ERR_NO_MEMORY;
4571                         goto exit;
4572                 }
4573
4574                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4575                                  ice_aqc_opc_remove_sw_rules);
4576
4577                 status = ice_aq_sw_rules(hw, s_rule,
4578                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4579                                          ice_aqc_opc_remove_sw_rules, NULL);
4580
4581                 /* Remove a book keeping from the list */
4582                 ice_free(hw, s_rule);
4583
4584                 if (status)
4585                         goto exit;
4586
4587                 LIST_DEL(&list_elem->list_entry);
4588                 ice_free(hw, list_elem);
4589         }
4590 exit:
4591         ice_release_lock(rule_lock);
4592         return status;
4593 }
4594
4595 /**
4596  * ice_aq_get_res_alloc - get allocated resources
4597  * @hw: pointer to the HW struct
4598  * @num_entries: pointer to u16 to store the number of resource entries returned
4599  * @buf: pointer to buffer
4600  * @buf_size: size of buf
4601  * @cd: pointer to command details structure or NULL
4602  *
4603  * The caller-supplied buffer must be large enough to store the resource
4604  * information for all resource types. Each resource type is an
4605  * ice_aqc_get_res_resp_elem structure.
4606  */
4607 enum ice_status
4608 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4609                      struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4610                      struct ice_sq_cd *cd)
4611 {
4612         struct ice_aqc_get_res_alloc *resp;
4613         enum ice_status status;
4614         struct ice_aq_desc desc;
4615
4616         if (!buf)
4617                 return ICE_ERR_BAD_PTR;
4618
4619         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4620                 return ICE_ERR_INVAL_SIZE;
4621
4622         resp = &desc.params.get_res;
4623
4624         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4625         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4626
4627         if (!status && num_entries)
4628                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
4629
4630         return status;
4631 }
4632
4633 /**
4634  * ice_aq_get_res_descs - get allocated resource descriptors
4635  * @hw: pointer to the hardware structure
4636  * @num_entries: number of resource entries in buffer
4637  * @buf: structure to hold response data buffer
4638  * @buf_size: size of buffer
4639  * @res_type: resource type
4640  * @res_shared: is resource shared
4641  * @desc_id: input - first desc ID to start; output - next desc ID
4642  * @cd: pointer to command details structure or NULL
4643  */
4644 enum ice_status
4645 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4646                      struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4647                      bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4648 {
4649         struct ice_aqc_get_allocd_res_desc *cmd;
4650         struct ice_aq_desc desc;
4651         enum ice_status status;
4652
4653         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4654
4655         cmd = &desc.params.get_res_desc;
4656
4657         if (!buf)
4658                 return ICE_ERR_PARAM;
4659
4660         if (buf_size != (num_entries * sizeof(*buf)))
4661                 return ICE_ERR_PARAM;
4662
4663         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4664
4665         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4666                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
4667                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4668         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4669
4670         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4671         if (!status)
4672                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4673
4674         return status;
4675 }
4676
4677 /**
4678  * ice_add_mac_rule - Add a MAC address based filter rule
4679  * @hw: pointer to the hardware structure
4680  * @m_list: list of MAC addresses and forwarding information
4681  * @sw: pointer to switch info struct for which function add rule
4682  * @lport: logic port number on which function add rule
4683  *
4684  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4685  * multiple unicast addresses, the function assumes that all the
4686  * addresses are unique in a given add_mac call. It doesn't
4687  * check for duplicates in this case, removing duplicates from a given
4688  * list should be taken care of in the caller of this function.
4689  */
4690 static enum ice_status
4691 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4692                  struct ice_switch_info *sw, u8 lport)
4693 {
4694         struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4695         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4696         struct ice_fltr_list_entry *m_list_itr;
4697         struct LIST_HEAD_TYPE *rule_head;
4698         u16 total_elem_left, s_rule_size;
4699         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4700         enum ice_status status = ICE_SUCCESS;
4701         u16 num_unicast = 0;
4702         u8 elem_sent;
4703
4704         s_rule = NULL;
4705         rule_lock = &recp_list->filt_rule_lock;
4706         rule_head = &recp_list->filt_rules;
4707
4708         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4709                             list_entry) {
4710                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4711                 u16 vsi_handle;
4712                 u16 hw_vsi_id;
4713
4714                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4715                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
4716                 if (!ice_is_vsi_valid(hw, vsi_handle))
4717                         return ICE_ERR_PARAM;
4718                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4719                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4720                 /* update the src in case it is VSI num */
4721                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4722                         return ICE_ERR_PARAM;
4723                 m_list_itr->fltr_info.src = hw_vsi_id;
4724                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4725                     IS_ZERO_ETHER_ADDR(add))
4726                         return ICE_ERR_PARAM;
4727                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4728                         /* Don't overwrite the unicast address */
4729                         ice_acquire_lock(rule_lock);
4730                         if (ice_find_rule_entry(rule_head,
4731                                                 &m_list_itr->fltr_info)) {
4732                                 ice_release_lock(rule_lock);
4733                                 return ICE_ERR_ALREADY_EXISTS;
4734                         }
4735                         ice_release_lock(rule_lock);
4736                         num_unicast++;
4737                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
4738                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4739                         m_list_itr->status =
4740                                 ice_add_rule_internal(hw, recp_list, lport,
4741                                                       m_list_itr);
4742                         if (m_list_itr->status)
4743                                 return m_list_itr->status;
4744                 }
4745         }
4746
4747         ice_acquire_lock(rule_lock);
4748         /* Exit if no suitable entries were found for adding bulk switch rule */
4749         if (!num_unicast) {
4750                 status = ICE_SUCCESS;
4751                 goto ice_add_mac_exit;
4752         }
4753
4754         /* Allocate switch rule buffer for the bulk update for unicast */
4755         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4756         s_rule = (struct ice_aqc_sw_rules_elem *)
4757                 ice_calloc(hw, num_unicast, s_rule_size);
4758         if (!s_rule) {
4759                 status = ICE_ERR_NO_MEMORY;
4760                 goto ice_add_mac_exit;
4761         }
4762
4763         r_iter = s_rule;
4764         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4765                             list_entry) {
4766                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4767                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4768
4769                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4770                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4771                                          ice_aqc_opc_add_sw_rules);
4772                         r_iter = (struct ice_aqc_sw_rules_elem *)
4773                                 ((u8 *)r_iter + s_rule_size);
4774                 }
4775         }
4776
4777         /* Call AQ bulk switch rule update for all unicast addresses */
4778         r_iter = s_rule;
4779         /* Call AQ switch rule in AQ_MAX chunk */
4780         for (total_elem_left = num_unicast; total_elem_left > 0;
4781              total_elem_left -= elem_sent) {
4782                 struct ice_aqc_sw_rules_elem *entry = r_iter;
4783
4784                 elem_sent = MIN_T(u8, total_elem_left,
4785                                   (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4786                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4787                                          elem_sent, ice_aqc_opc_add_sw_rules,
4788                                          NULL);
4789                 if (status)
4790                         goto ice_add_mac_exit;
4791                 r_iter = (struct ice_aqc_sw_rules_elem *)
4792                         ((u8 *)r_iter + (elem_sent * s_rule_size));
4793         }
4794
4795         /* Fill up rule ID based on the value returned from FW */
4796         r_iter = s_rule;
4797         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4798                             list_entry) {
4799                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4800                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4801                 struct ice_fltr_mgmt_list_entry *fm_entry;
4802
4803                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4804                         f_info->fltr_rule_id =
4805                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4806                         f_info->fltr_act = ICE_FWD_TO_VSI;
4807                         /* Create an entry to track this MAC address */
4808                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
4809                                 ice_malloc(hw, sizeof(*fm_entry));
4810                         if (!fm_entry) {
4811                                 status = ICE_ERR_NO_MEMORY;
4812                                 goto ice_add_mac_exit;
4813                         }
4814                         fm_entry->fltr_info = *f_info;
4815                         fm_entry->vsi_count = 1;
4816                         /* The book keeping entries will get removed when
4817                          * base driver calls remove filter AQ command
4818                          */
4819
4820                         LIST_ADD(&fm_entry->list_entry, rule_head);
4821                         r_iter = (struct ice_aqc_sw_rules_elem *)
4822                                 ((u8 *)r_iter + s_rule_size);
4823                 }
4824         }
4825
4826 ice_add_mac_exit:
4827         ice_release_lock(rule_lock);
4828         if (s_rule)
4829                 ice_free(hw, s_rule);
4830         return status;
4831 }
4832
4833 /**
4834  * ice_add_mac - Add a MAC address based filter rule
4835  * @hw: pointer to the hardware structure
4836  * @m_list: list of MAC addresses and forwarding information
4837  *
4838  * Function add MAC rule for logical port from HW struct
4839  */
4840 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4841 {
4842         if (!m_list || !hw)
4843                 return ICE_ERR_PARAM;
4844
4845         return ice_add_mac_rule(hw, m_list, hw->switch_info,
4846                                 hw->port_info->lport);
4847 }
4848
4849 /**
4850  * ice_add_vlan_internal - Add one VLAN based filter rule
4851  * @hw: pointer to the hardware structure
4852  * @recp_list: recipe list for which rule has to be added
4853  * @f_entry: filter entry containing one VLAN information
4854  */
4855 static enum ice_status
4856 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4857                       struct ice_fltr_list_entry *f_entry)
4858 {
4859         struct ice_fltr_mgmt_list_entry *v_list_itr;
4860         struct ice_fltr_info *new_fltr, *cur_fltr;
4861         enum ice_sw_lkup_type lkup_type;
4862         u16 vsi_list_id = 0, vsi_handle;
4863         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4864         enum ice_status status = ICE_SUCCESS;
4865
4866         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4867                 return ICE_ERR_PARAM;
4868
4869         f_entry->fltr_info.fwd_id.hw_vsi_id =
4870                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4871         new_fltr = &f_entry->fltr_info;
4872
4873         /* VLAN ID should only be 12 bits */
4874         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4875                 return ICE_ERR_PARAM;
4876
4877         if (new_fltr->src_id != ICE_SRC_ID_VSI)
4878                 return ICE_ERR_PARAM;
4879
4880         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4881         lkup_type = new_fltr->lkup_type;
4882         vsi_handle = new_fltr->vsi_handle;
4883         rule_lock = &recp_list->filt_rule_lock;
4884         ice_acquire_lock(rule_lock);
4885         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4886         if (!v_list_itr) {
4887                 struct ice_vsi_list_map_info *map_info = NULL;
4888
4889                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4890                         /* All VLAN pruning rules use a VSI list. Check if
4891                          * there is already a VSI list containing VSI that we
4892                          * want to add. If found, use the same vsi_list_id for
4893                          * this new VLAN rule or else create a new list.
4894                          */
4895                         map_info = ice_find_vsi_list_entry(recp_list,
4896                                                            vsi_handle,
4897                                                            &vsi_list_id);
4898                         if (!map_info) {
4899                                 status = ice_create_vsi_list_rule(hw,
4900                                                                   &vsi_handle,
4901                                                                   1,
4902                                                                   &vsi_list_id,
4903                                                                   lkup_type);
4904                                 if (status)
4905                                         goto exit;
4906                         }
4907                         /* Convert the action to forwarding to a VSI list. */
4908                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4909                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4910                 }
4911
4912                 status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4913                 if (!status) {
4914                         v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4915                                                          new_fltr);
4916                         if (!v_list_itr) {
4917                                 status = ICE_ERR_DOES_NOT_EXIST;
4918                                 goto exit;
4919                         }
4920                         /* reuse VSI list for new rule and increment ref_cnt */
4921                         if (map_info) {
4922                                 v_list_itr->vsi_list_info = map_info;
4923                                 map_info->ref_cnt++;
4924                         } else {
4925                                 v_list_itr->vsi_list_info =
4926                                         ice_create_vsi_list_map(hw, &vsi_handle,
4927                                                                 1, vsi_list_id);
4928                         }
4929                 }
4930         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4931                 /* Update existing VSI list to add new VSI ID only if it used
4932                  * by one VLAN rule.
4933                  */
4934                 cur_fltr = &v_list_itr->fltr_info;
4935                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4936                                                  new_fltr);
4937         } else {
4938                 /* If VLAN rule exists and VSI list being used by this rule is
4939                  * referenced by more than 1 VLAN rule. Then create a new VSI
4940                  * list appending previous VSI with new VSI and update existing
4941                  * VLAN rule to point to new VSI list ID
4942                  */
4943                 struct ice_fltr_info tmp_fltr;
4944                 u16 vsi_handle_arr[2];
4945                 u16 cur_handle;
4946
4947                 /* Current implementation only supports reusing VSI list with
4948                  * one VSI count. We should never hit below condition
4949                  */
4950                 if (v_list_itr->vsi_count > 1 &&
4951                     v_list_itr->vsi_list_info->ref_cnt > 1) {
4952                         ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4953                         status = ICE_ERR_CFG;
4954                         goto exit;
4955                 }
4956
4957                 cur_handle =
4958                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4959                                            ICE_MAX_VSI);
4960
4961                 /* A rule already exists with the new VSI being added */
4962                 if (cur_handle == vsi_handle) {
4963                         status = ICE_ERR_ALREADY_EXISTS;
4964                         goto exit;
4965                 }
4966
4967                 vsi_handle_arr[0] = cur_handle;
4968                 vsi_handle_arr[1] = vsi_handle;
4969                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4970                                                   &vsi_list_id, lkup_type);
4971                 if (status)
4972                         goto exit;
4973
4974                 tmp_fltr = v_list_itr->fltr_info;
4975                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4976                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4977                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4978                 /* Update the previous switch rule to a new VSI list which
4979                  * includes current VSI that is requested
4980                  */
4981                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4982                 if (status)
4983                         goto exit;
4984
4985                 /* before overriding VSI list map info. decrement ref_cnt of
4986                  * previous VSI list
4987                  */
4988                 v_list_itr->vsi_list_info->ref_cnt--;
4989
4990                 /* now update to newly created list */
4991                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4992                 v_list_itr->vsi_list_info =
4993                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4994                                                 vsi_list_id);
4995                 v_list_itr->vsi_count++;
4996         }
4997
4998 exit:
4999         ice_release_lock(rule_lock);
5000         return status;
5001 }
5002
5003 /**
5004  * ice_add_vlan_rule - Add VLAN based filter rule
5005  * @hw: pointer to the hardware structure
5006  * @v_list: list of VLAN entries and forwarding information
5007  * @sw: pointer to switch info struct for which function add rule
5008  */
5009 static enum ice_status
5010 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5011                   struct ice_switch_info *sw)
5012 {
5013         struct ice_fltr_list_entry *v_list_itr;
5014         struct ice_sw_recipe *recp_list;
5015
5016         recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5017         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5018                             list_entry) {
5019                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5020                         return ICE_ERR_PARAM;
5021                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5022                 v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5023                                                            v_list_itr);
5024                 if (v_list_itr->status)
5025                         return v_list_itr->status;
5026         }
5027         return ICE_SUCCESS;
5028 }
5029
5030 /**
5031  * ice_add_vlan - Add a VLAN based filter rule
5032  * @hw: pointer to the hardware structure
5033  * @v_list: list of VLAN and forwarding information
5034  *
5035  * Function add VLAN rule for logical port from HW struct
5036  */
5037 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5038 {
5039         if (!v_list || !hw)
5040                 return ICE_ERR_PARAM;
5041
5042         return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5043 }
5044
5045 /**
5046  * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5047  * @hw: pointer to the hardware structure
5048  * @mv_list: list of MAC and VLAN filters
5049  * @sw: pointer to switch info struct for which function add rule
5050  * @lport: logic port number on which function add rule
5051  *
5052  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5053  * pruning bits enabled, then it is the responsibility of the caller to make
5054  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5055  * VLAN won't be received on that VSI otherwise.
5056  */
5057 static enum ice_status
5058 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5059                       struct ice_switch_info *sw, u8 lport)
5060 {
5061         struct ice_fltr_list_entry *mv_list_itr;
5062         struct ice_sw_recipe *recp_list;
5063
5064         if (!mv_list || !hw)
5065                 return ICE_ERR_PARAM;
5066
5067         recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5068         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5069                             list_entry) {
5070                 enum ice_sw_lkup_type l_type =
5071                         mv_list_itr->fltr_info.lkup_type;
5072
5073                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5074                         return ICE_ERR_PARAM;
5075                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5076                 mv_list_itr->status =
5077                         ice_add_rule_internal(hw, recp_list, lport,
5078                                               mv_list_itr);
5079                 if (mv_list_itr->status)
5080                         return mv_list_itr->status;
5081         }
5082         return ICE_SUCCESS;
5083 }
5084
5085 /**
5086  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5087  * @hw: pointer to the hardware structure
5088  * @mv_list: list of MAC VLAN addresses and forwarding information
5089  *
5090  * Function add MAC VLAN rule for logical port from HW struct
5091  */
5092 enum ice_status
5093 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5094 {
5095         if (!mv_list || !hw)
5096                 return ICE_ERR_PARAM;
5097
5098         return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5099                                      hw->port_info->lport);
5100 }
5101
5102 /**
5103  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5104  * @hw: pointer to the hardware structure
5105  * @em_list: list of ether type MAC filter, MAC is optional
5106  * @sw: pointer to switch info struct for which function add rule
5107  * @lport: logic port number on which function add rule
5108  *
5109  * This function requires the caller to populate the entries in
5110  * the filter list with the necessary fields (including flags to
5111  * indicate Tx or Rx rules).
5112  */
5113 static enum ice_status
5114 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5115                      struct ice_switch_info *sw, u8 lport)
5116 {
5117         struct ice_fltr_list_entry *em_list_itr;
5118
5119         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5120                             list_entry) {
5121                 struct ice_sw_recipe *recp_list;
5122                 enum ice_sw_lkup_type l_type;
5123
5124                 l_type = em_list_itr->fltr_info.lkup_type;
5125                 recp_list = &sw->recp_list[l_type];
5126
5127                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5128                     l_type != ICE_SW_LKUP_ETHERTYPE)
5129                         return ICE_ERR_PARAM;
5130
5131                 em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5132                                                             lport,
5133                                                             em_list_itr);
5134                 if (em_list_itr->status)
5135                         return em_list_itr->status;
5136         }
5137         return ICE_SUCCESS;
5138 }
5139
5140 /**
5141  * ice_add_eth_mac - Add a ethertype based filter rule
5142  * @hw: pointer to the hardware structure
5143  * @em_list: list of ethertype and forwarding information
5144  *
5145  * Function add ethertype rule for logical port from HW struct
5146  */
5147 enum ice_status
5148 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5149 {
5150         if (!em_list || !hw)
5151                 return ICE_ERR_PARAM;
5152
5153         return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5154                                     hw->port_info->lport);
5155 }
5156
5157 /**
5158  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5159  * @hw: pointer to the hardware structure
5160  * @em_list: list of ethertype or ethertype MAC entries
5161  * @sw: pointer to switch info struct for which function add rule
5162  */
5163 static enum ice_status
5164 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5165                         struct ice_switch_info *sw)
5166 {
5167         struct ice_fltr_list_entry *em_list_itr, *tmp;
5168
5169         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5170                                  list_entry) {
5171                 struct ice_sw_recipe *recp_list;
5172                 enum ice_sw_lkup_type l_type;
5173
5174                 l_type = em_list_itr->fltr_info.lkup_type;
5175
5176                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5177                     l_type != ICE_SW_LKUP_ETHERTYPE)
5178                         return ICE_ERR_PARAM;
5179
5180                 recp_list = &sw->recp_list[l_type];
5181                 em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5182                                                                em_list_itr);
5183                 if (em_list_itr->status)
5184                         return em_list_itr->status;
5185         }
5186         return ICE_SUCCESS;
5187 }
5188
5189 /**
5190  * ice_remove_eth_mac - remove a ethertype based filter rule
5191  * @hw: pointer to the hardware structure
5192  * @em_list: list of ethertype and forwarding information
5193  *
5194  */
5195 enum ice_status
5196 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5197 {
5198         if (!em_list || !hw)
5199                 return ICE_ERR_PARAM;
5200
5201         return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5202 }
5203
5204 /**
5205  * ice_rem_sw_rule_info
5206  * @hw: pointer to the hardware structure
5207  * @rule_head: pointer to the switch list structure that we want to delete
5208  */
5209 static void
5210 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5211 {
5212         if (!LIST_EMPTY(rule_head)) {
5213                 struct ice_fltr_mgmt_list_entry *entry;
5214                 struct ice_fltr_mgmt_list_entry *tmp;
5215
5216                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5217                                          ice_fltr_mgmt_list_entry, list_entry) {
5218                         LIST_DEL(&entry->list_entry);
5219                         ice_free(hw, entry);
5220                 }
5221         }
5222 }
5223
5224 /**
5225  * ice_rem_adv_rule_info
5226  * @hw: pointer to the hardware structure
5227  * @rule_head: pointer to the switch list structure that we want to delete
5228  */
5229 static void
5230 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5231 {
5232         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5233         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5234
5235         if (LIST_EMPTY(rule_head))
5236                 return;
5237
5238         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5239                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
5240                 LIST_DEL(&lst_itr->list_entry);
5241                 ice_free(hw, lst_itr->lkups);
5242                 ice_free(hw, lst_itr);
5243         }
5244 }
5245
5246 /**
5247  * ice_rem_all_sw_rules_info
5248  * @hw: pointer to the hardware structure
5249  */
5250 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5251 {
5252         struct ice_switch_info *sw = hw->switch_info;
5253         u8 i;
5254
5255         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5256                 struct LIST_HEAD_TYPE *rule_head;
5257
5258                 rule_head = &sw->recp_list[i].filt_rules;
5259                 if (!sw->recp_list[i].adv_rule)
5260                         ice_rem_sw_rule_info(hw, rule_head);
5261                 else
5262                         ice_rem_adv_rule_info(hw, rule_head);
5263                 if (sw->recp_list[i].adv_rule &&
5264                     LIST_EMPTY(&sw->recp_list[i].filt_rules))
5265                         sw->recp_list[i].adv_rule = false;
5266         }
5267 }
5268
5269 /**
5270  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5271  * @pi: pointer to the port_info structure
5272  * @vsi_handle: VSI handle to set as default
5273  * @set: true to add the above mentioned switch rule, false to remove it
5274  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5275  *
5276  * add filter rule to set/unset given VSI as default VSI for the switch
5277  * (represented by swid)
5278  */
5279 enum ice_status
5280 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5281                  u8 direction)
5282 {
5283         struct ice_aqc_sw_rules_elem *s_rule;
5284         struct ice_fltr_info f_info;
5285         struct ice_hw *hw = pi->hw;
5286         enum ice_adminq_opc opcode;
5287         enum ice_status status;
5288         u16 s_rule_size;
5289         u16 hw_vsi_id;
5290
5291         if (!ice_is_vsi_valid(hw, vsi_handle))
5292                 return ICE_ERR_PARAM;
5293         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5294
5295         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5296                 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5297
5298         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5299         if (!s_rule)
5300                 return ICE_ERR_NO_MEMORY;
5301
5302         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5303
5304         f_info.lkup_type = ICE_SW_LKUP_DFLT;
5305         f_info.flag = direction;
5306         f_info.fltr_act = ICE_FWD_TO_VSI;
5307         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5308
5309         if (f_info.flag & ICE_FLTR_RX) {
5310                 f_info.src = pi->lport;
5311                 f_info.src_id = ICE_SRC_ID_LPORT;
5312                 if (!set)
5313                         f_info.fltr_rule_id =
5314                                 pi->dflt_rx_vsi_rule_id;
5315         } else if (f_info.flag & ICE_FLTR_TX) {
5316                 f_info.src_id = ICE_SRC_ID_VSI;
5317                 f_info.src = hw_vsi_id;
5318                 if (!set)
5319                         f_info.fltr_rule_id =
5320                                 pi->dflt_tx_vsi_rule_id;
5321         }
5322
5323         if (set)
5324                 opcode = ice_aqc_opc_add_sw_rules;
5325         else
5326                 opcode = ice_aqc_opc_remove_sw_rules;
5327
5328         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5329
5330         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5331         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5332                 goto out;
5333         if (set) {
5334                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5335
5336                 if (f_info.flag & ICE_FLTR_TX) {
5337                         pi->dflt_tx_vsi_num = hw_vsi_id;
5338                         pi->dflt_tx_vsi_rule_id = index;
5339                 } else if (f_info.flag & ICE_FLTR_RX) {
5340                         pi->dflt_rx_vsi_num = hw_vsi_id;
5341                         pi->dflt_rx_vsi_rule_id = index;
5342                 }
5343         } else {
5344                 if (f_info.flag & ICE_FLTR_TX) {
5345                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5346                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5347                 } else if (f_info.flag & ICE_FLTR_RX) {
5348                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5349                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5350                 }
5351         }
5352
5353 out:
5354         ice_free(hw, s_rule);
5355         return status;
5356 }
5357
5358 /**
5359  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5360  * @list_head: head of rule list
5361  * @f_info: rule information
5362  *
5363  * Helper function to search for a unicast rule entry - this is to be used
5364  * to remove unicast MAC filter that is not shared with other VSIs on the
5365  * PF switch.
5366  *
5367  * Returns pointer to entry storing the rule if found
5368  */
5369 static struct ice_fltr_mgmt_list_entry *
5370 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5371                           struct ice_fltr_info *f_info)
5372 {
5373         struct ice_fltr_mgmt_list_entry *list_itr;
5374
5375         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5376                             list_entry) {
5377                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5378                             sizeof(f_info->l_data)) &&
5379                     f_info->fwd_id.hw_vsi_id ==
5380                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
5381                     f_info->flag == list_itr->fltr_info.flag)
5382                         return list_itr;
5383         }
5384         return NULL;
5385 }
5386
5387 /**
5388  * ice_remove_mac_rule - remove a MAC based filter rule
5389  * @hw: pointer to the hardware structure
5390  * @m_list: list of MAC addresses and forwarding information
5391  * @recp_list: list from which function remove MAC address
5392  *
5393  * This function removes either a MAC filter rule or a specific VSI from a
5394  * VSI list for a multicast MAC address.
5395  *
5396  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5397  * ice_add_mac. Caller should be aware that this call will only work if all
5398  * the entries passed into m_list were added previously. It will not attempt to
5399  * do a partial remove of entries that were found.
5400  */
5401 static enum ice_status
5402 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5403                     struct ice_sw_recipe *recp_list)
5404 {
5405         struct ice_fltr_list_entry *list_itr, *tmp;
5406         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5407
5408         if (!m_list)
5409                 return ICE_ERR_PARAM;
5410
5411         rule_lock = &recp_list->filt_rule_lock;
5412         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5413                                  list_entry) {
5414                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5415                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5416                 u16 vsi_handle;
5417
5418                 if (l_type != ICE_SW_LKUP_MAC)
5419                         return ICE_ERR_PARAM;
5420
5421                 vsi_handle = list_itr->fltr_info.vsi_handle;
5422                 if (!ice_is_vsi_valid(hw, vsi_handle))
5423                         return ICE_ERR_PARAM;
5424
5425                 list_itr->fltr_info.fwd_id.hw_vsi_id =
5426                                         ice_get_hw_vsi_num(hw, vsi_handle);
5427                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
5428                         /* Don't remove the unicast address that belongs to
5429                          * another VSI on the switch, since it is not being
5430                          * shared...
5431                          */
5432                         ice_acquire_lock(rule_lock);
5433                         if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5434                                                        &list_itr->fltr_info)) {
5435                                 ice_release_lock(rule_lock);
5436                                 return ICE_ERR_DOES_NOT_EXIST;
5437                         }
5438                         ice_release_lock(rule_lock);
5439                 }
5440                 list_itr->status = ice_remove_rule_internal(hw, recp_list,
5441                                                             list_itr);
5442                 if (list_itr->status)
5443                         return list_itr->status;
5444         }
5445         return ICE_SUCCESS;
5446 }
5447
5448 /**
5449  * ice_remove_mac - remove a MAC address based filter rule
5450  * @hw: pointer to the hardware structure
5451  * @m_list: list of MAC addresses and forwarding information
5452  *
5453  */
5454 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5455 {
5456         struct ice_sw_recipe *recp_list;
5457
5458         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5459         return ice_remove_mac_rule(hw, m_list, recp_list);
5460 }
5461
5462 /**
5463  * ice_remove_vlan_rule - Remove VLAN based filter rule
5464  * @hw: pointer to the hardware structure
5465  * @v_list: list of VLAN entries and forwarding information
5466  * @recp_list: list from which function remove VLAN
5467  */
5468 static enum ice_status
5469 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5470                      struct ice_sw_recipe *recp_list)
5471 {
5472         struct ice_fltr_list_entry *v_list_itr, *tmp;
5473
5474         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5475                                  list_entry) {
5476                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5477
5478                 if (l_type != ICE_SW_LKUP_VLAN)
5479                         return ICE_ERR_PARAM;
5480                 v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5481                                                               v_list_itr);
5482                 if (v_list_itr->status)
5483                         return v_list_itr->status;
5484         }
5485         return ICE_SUCCESS;
5486 }
5487
5488 /**
5489  * ice_remove_vlan - remove a VLAN address based filter rule
5490  * @hw: pointer to the hardware structure
5491  * @v_list: list of VLAN and forwarding information
5492  *
5493  */
5494 enum ice_status
5495 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5496 {
5497         struct ice_sw_recipe *recp_list;
5498
5499         if (!v_list || !hw)
5500                 return ICE_ERR_PARAM;
5501
5502         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5503         return ice_remove_vlan_rule(hw, v_list, recp_list);
5504 }
5505
5506 /**
5507  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5508  * @hw: pointer to the hardware structure
5509  * @v_list: list of MAC VLAN entries and forwarding information
5510  * @recp_list: list from which function remove MAC VLAN
5511  */
5512 static enum ice_status
5513 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5514                          struct ice_sw_recipe *recp_list)
5515 {
5516         struct ice_fltr_list_entry *v_list_itr, *tmp;
5517
5518         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5519         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5520                                  list_entry) {
5521                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5522
5523                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
5524                         return ICE_ERR_PARAM;
5525                 v_list_itr->status =
5526                         ice_remove_rule_internal(hw, recp_list,
5527                                                  v_list_itr);
5528                 if (v_list_itr->status)
5529                         return v_list_itr->status;
5530         }
5531         return ICE_SUCCESS;
5532 }
5533
5534 /**
5535  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5536  * @hw: pointer to the hardware structure
5537  * @mv_list: list of MAC VLAN and forwarding information
5538  */
5539 enum ice_status
5540 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5541 {
5542         struct ice_sw_recipe *recp_list;
5543
5544         if (!mv_list || !hw)
5545                 return ICE_ERR_PARAM;
5546
5547         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5548         return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5549 }
5550
5551 /**
5552  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5553  * @fm_entry: filter entry to inspect
5554  * @vsi_handle: VSI handle to compare with filter info
5555  */
5556 static bool
5557 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5558 {
5559         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5560                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5561                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5562                  fm_entry->vsi_list_info &&
5563                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5564                                  vsi_handle))));
5565 }
5566
5567 /**
5568  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5569  * @hw: pointer to the hardware structure
5570  * @vsi_handle: VSI handle to remove filters from
5571  * @vsi_list_head: pointer to the list to add entry to
5572  * @fi: pointer to fltr_info of filter entry to copy & add
5573  *
5574  * Helper function, used when creating a list of filters to remove from
5575  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5576  * original filter entry, with the exception of fltr_info.fltr_act and
5577  * fltr_info.fwd_id fields. These are set such that later logic can
5578  * extract which VSI to remove the fltr from, and pass on that information.
5579  */
5580 static enum ice_status
5581 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5582                                struct LIST_HEAD_TYPE *vsi_list_head,
5583                                struct ice_fltr_info *fi)
5584 {
5585         struct ice_fltr_list_entry *tmp;
5586
5587         /* this memory is freed up in the caller function
5588          * once filters for this VSI are removed
5589          */
5590         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5591         if (!tmp)
5592                 return ICE_ERR_NO_MEMORY;
5593
5594         tmp->fltr_info = *fi;
5595
5596         /* Overwrite these fields to indicate which VSI to remove filter from,
5597          * so find and remove logic can extract the information from the
5598          * list entries. Note that original entries will still have proper
5599          * values.
5600          */
5601         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5602         tmp->fltr_info.vsi_handle = vsi_handle;
5603         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5604
5605         LIST_ADD(&tmp->list_entry, vsi_list_head);
5606
5607         return ICE_SUCCESS;
5608 }
5609
5610 /**
5611  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5612  * @hw: pointer to the hardware structure
5613  * @vsi_handle: VSI handle to remove filters from
5614  * @lkup_list_head: pointer to the list that has certain lookup type filters
5615  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5616  *
5617  * Locates all filters in lkup_list_head that are used by the given VSI,
5618  * and adds COPIES of those entries to vsi_list_head (intended to be used
5619  * to remove the listed filters).
5620  * Note that this means all entries in vsi_list_head must be explicitly
5621  * deallocated by the caller when done with list.
5622  */
5623 static enum ice_status
5624 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5625                          struct LIST_HEAD_TYPE *lkup_list_head,
5626                          struct LIST_HEAD_TYPE *vsi_list_head)
5627 {
5628         struct ice_fltr_mgmt_list_entry *fm_entry;
5629         enum ice_status status = ICE_SUCCESS;
5630
5631         /* check to make sure VSI ID is valid and within boundary */
5632         if (!ice_is_vsi_valid(hw, vsi_handle))
5633                 return ICE_ERR_PARAM;
5634
5635         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5636                             ice_fltr_mgmt_list_entry, list_entry) {
5637                 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5638                         continue;
5639
5640                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5641                                                         vsi_list_head,
5642                                                         &fm_entry->fltr_info);
5643                 if (status)
5644                         return status;
5645         }
5646         return status;
5647 }
5648
5649 /**
5650  * ice_determine_promisc_mask
5651  * @fi: filter info to parse
5652  *
5653  * Helper function to determine which ICE_PROMISC_ mask corresponds
5654  * to given filter into.
5655  */
5656 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5657 {
5658         u16 vid = fi->l_data.mac_vlan.vlan_id;
5659         u8 *macaddr = fi->l_data.mac.mac_addr;
5660         bool is_tx_fltr = false;
5661         u8 promisc_mask = 0;
5662
5663         if (fi->flag == ICE_FLTR_TX)
5664                 is_tx_fltr = true;
5665
5666         if (IS_BROADCAST_ETHER_ADDR(macaddr))
5667                 promisc_mask |= is_tx_fltr ?
5668                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5669         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5670                 promisc_mask |= is_tx_fltr ?
5671                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5672         else if (IS_UNICAST_ETHER_ADDR(macaddr))
5673                 promisc_mask |= is_tx_fltr ?
5674                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5675         if (vid)
5676                 promisc_mask |= is_tx_fltr ?
5677                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5678
5679         return promisc_mask;
5680 }
5681
5682 /**
5683  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5684  * @hw: pointer to the hardware structure
5685  * @vsi_handle: VSI handle to retrieve info from
5686  * @promisc_mask: pointer to mask to be filled in
5687  * @vid: VLAN ID of promisc VLAN VSI
5688  * @sw: pointer to switch info struct for which function add rule
5689  */
5690 static enum ice_status
5691 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5692                      u16 *vid, struct ice_switch_info *sw)
5693 {
5694         struct ice_fltr_mgmt_list_entry *itr;
5695         struct LIST_HEAD_TYPE *rule_head;
5696         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5697
5698         if (!ice_is_vsi_valid(hw, vsi_handle))
5699                 return ICE_ERR_PARAM;
5700
5701         *vid = 0;
5702         *promisc_mask = 0;
5703         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5704         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5705
5706         ice_acquire_lock(rule_lock);
5707         LIST_FOR_EACH_ENTRY(itr, rule_head,
5708                             ice_fltr_mgmt_list_entry, list_entry) {
5709                 /* Continue if this filter doesn't apply to this VSI or the
5710                  * VSI ID is not in the VSI map for this filter
5711                  */
5712                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5713                         continue;
5714
5715                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5716         }
5717         ice_release_lock(rule_lock);
5718
5719         return ICE_SUCCESS;
5720 }
5721
5722 /**
5723  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5724  * @hw: pointer to the hardware structure
5725  * @vsi_handle: VSI handle to retrieve info from
5726  * @promisc_mask: pointer to mask to be filled in
5727  * @vid: VLAN ID of promisc VLAN VSI
5728  */
5729 enum ice_status
5730 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5731                     u16 *vid)
5732 {
5733         return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5734                                     vid, hw->switch_info);
5735 }
5736
5737 /**
5738  * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5739  * @hw: pointer to the hardware structure
5740  * @vsi_handle: VSI handle to retrieve info from
5741  * @promisc_mask: pointer to mask to be filled in
5742  * @vid: VLAN ID of promisc VLAN VSI
5743  * @sw: pointer to switch info struct for which function add rule
5744  */
5745 static enum ice_status
5746 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5747                           u16 *vid, struct ice_switch_info *sw)
5748 {
5749         struct ice_fltr_mgmt_list_entry *itr;
5750         struct LIST_HEAD_TYPE *rule_head;
5751         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5752
5753         if (!ice_is_vsi_valid(hw, vsi_handle))
5754                 return ICE_ERR_PARAM;
5755
5756         *vid = 0;
5757         *promisc_mask = 0;
5758         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5759         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5760
5761         ice_acquire_lock(rule_lock);
5762         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5763                             list_entry) {
5764                 /* Continue if this filter doesn't apply to this VSI or the
5765                  * VSI ID is not in the VSI map for this filter
5766                  */
5767                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5768                         continue;
5769
5770                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5771         }
5772         ice_release_lock(rule_lock);
5773
5774         return ICE_SUCCESS;
5775 }
5776
5777 /**
5778  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5779  * @hw: pointer to the hardware structure
5780  * @vsi_handle: VSI handle to retrieve info from
5781  * @promisc_mask: pointer to mask to be filled in
5782  * @vid: VLAN ID of promisc VLAN VSI
5783  */
5784 enum ice_status
5785 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5786                          u16 *vid)
5787 {
5788         return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5789                                          vid, hw->switch_info);
5790 }
5791
5792 /**
5793  * ice_remove_promisc - Remove promisc based filter rules
5794  * @hw: pointer to the hardware structure
5795  * @recp_id: recipe ID for which the rule needs to removed
5796  * @v_list: list of promisc entries
5797  */
5798 static enum ice_status
5799 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5800                    struct LIST_HEAD_TYPE *v_list)
5801 {
5802         struct ice_fltr_list_entry *v_list_itr, *tmp;
5803         struct ice_sw_recipe *recp_list;
5804
5805         recp_list = &hw->switch_info->recp_list[recp_id];
5806         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5807                                  list_entry) {
5808                 v_list_itr->status =
5809                         ice_remove_rule_internal(hw, recp_list, v_list_itr);
5810                 if (v_list_itr->status)
5811                         return v_list_itr->status;
5812         }
5813         return ICE_SUCCESS;
5814 }
5815
5816 /**
5817  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5818  * @hw: pointer to the hardware structure
5819  * @vsi_handle: VSI handle to clear mode
5820  * @promisc_mask: mask of promiscuous config bits to clear
5821  * @vid: VLAN ID to clear VLAN promiscuous
5822  * @sw: pointer to switch info struct for which function add rule
5823  */
5824 static enum ice_status
5825 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5826                        u16 vid, struct ice_switch_info *sw)
5827 {
5828         struct ice_fltr_list_entry *fm_entry, *tmp;
5829         struct LIST_HEAD_TYPE remove_list_head;
5830         struct ice_fltr_mgmt_list_entry *itr;
5831         struct LIST_HEAD_TYPE *rule_head;
5832         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
5833         enum ice_status status = ICE_SUCCESS;
5834         u8 recipe_id;
5835
5836         if (!ice_is_vsi_valid(hw, vsi_handle))
5837                 return ICE_ERR_PARAM;
5838
5839         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5840                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5841         else
5842                 recipe_id = ICE_SW_LKUP_PROMISC;
5843
5844         rule_head = &sw->recp_list[recipe_id].filt_rules;
5845         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5846
5847         INIT_LIST_HEAD(&remove_list_head);
5848
5849         ice_acquire_lock(rule_lock);
5850         LIST_FOR_EACH_ENTRY(itr, rule_head,
5851                             ice_fltr_mgmt_list_entry, list_entry) {
5852                 struct ice_fltr_info *fltr_info;
5853                 u8 fltr_promisc_mask = 0;
5854
5855                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
5856                         continue;
5857                 fltr_info = &itr->fltr_info;
5858
5859                 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5860                     vid != fltr_info->l_data.mac_vlan.vlan_id)
5861                         continue;
5862
5863                 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5864
5865                 /* Skip if filter is not completely specified by given mask */
5866                 if (fltr_promisc_mask & ~promisc_mask)
5867                         continue;
5868
5869                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5870                                                         &remove_list_head,
5871                                                         fltr_info);
5872                 if (status) {
5873                         ice_release_lock(rule_lock);
5874                         goto free_fltr_list;
5875                 }
5876         }
5877         ice_release_lock(rule_lock);
5878
5879         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5880
5881 free_fltr_list:
5882         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5883                                  ice_fltr_list_entry, list_entry) {
5884                 LIST_DEL(&fm_entry->list_entry);
5885                 ice_free(hw, fm_entry);
5886         }
5887
5888         return status;
5889 }
5890
5891 /**
5892  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5893  * @hw: pointer to the hardware structure
5894  * @vsi_handle: VSI handle to clear mode
5895  * @promisc_mask: mask of promiscuous config bits to clear
5896  * @vid: VLAN ID to clear VLAN promiscuous
5897  */
5898 enum ice_status
5899 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5900                       u8 promisc_mask, u16 vid)
5901 {
5902         return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5903                                       vid, hw->switch_info);
5904 }
5905
5906 /**
5907  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5908  * @hw: pointer to the hardware structure
5909  * @vsi_handle: VSI handle to configure
5910  * @promisc_mask: mask of promiscuous config bits
5911  * @vid: VLAN ID to set VLAN promiscuous
5912  * @lport: logical port number to configure promisc mode
5913  * @sw: pointer to switch info struct for which function add rule
5914  */
5915 static enum ice_status
5916 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5917                      u16 vid, u8 lport, struct ice_switch_info *sw)
5918 {
5919         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5920         struct ice_fltr_list_entry f_list_entry;
5921         struct ice_fltr_info new_fltr;
5922         enum ice_status status = ICE_SUCCESS;
5923         bool is_tx_fltr;
5924         u16 hw_vsi_id;
5925         int pkt_type;
5926         u8 recipe_id;
5927
5928         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5929
5930         if (!ice_is_vsi_valid(hw, vsi_handle))
5931                 return ICE_ERR_PARAM;
5932         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5933
5934         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5935
5936         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5937                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5938                 new_fltr.l_data.mac_vlan.vlan_id = vid;
5939                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5940         } else {
5941                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5942                 recipe_id = ICE_SW_LKUP_PROMISC;
5943         }
5944
5945         /* Separate filters must be set for each direction/packet type
5946          * combination, so we will loop over the mask value, store the
5947          * individual type, and clear it out in the input mask as it
5948          * is found.
5949          */
5950         while (promisc_mask) {
5951                 struct ice_sw_recipe *recp_list;
5952                 u8 *mac_addr;
5953
5954                 pkt_type = 0;
5955                 is_tx_fltr = false;
5956
5957                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5958                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5959                         pkt_type = UCAST_FLTR;
5960                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5961                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5962                         pkt_type = UCAST_FLTR;
5963                         is_tx_fltr = true;
5964                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5965                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5966                         pkt_type = MCAST_FLTR;
5967                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5968                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5969                         pkt_type = MCAST_FLTR;
5970                         is_tx_fltr = true;
5971                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5972                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5973                         pkt_type = BCAST_FLTR;
5974                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5975                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5976                         pkt_type = BCAST_FLTR;
5977                         is_tx_fltr = true;
5978                 }
5979
5980                 /* Check for VLAN promiscuous flag */
5981                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5982                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5983                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5984                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5985                         is_tx_fltr = true;
5986                 }
5987
5988                 /* Set filter DA based on packet type */
5989                 mac_addr = new_fltr.l_data.mac.mac_addr;
5990                 if (pkt_type == BCAST_FLTR) {
5991                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5992                 } else if (pkt_type == MCAST_FLTR ||
5993                            pkt_type == UCAST_FLTR) {
5994                         /* Use the dummy ether header DA */
5995                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5996                                    ICE_NONDMA_TO_NONDMA);
5997                         if (pkt_type == MCAST_FLTR)
5998                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
5999                 }
6000
6001                 /* Need to reset this to zero for all iterations */
6002                 new_fltr.flag = 0;
6003                 if (is_tx_fltr) {
6004                         new_fltr.flag |= ICE_FLTR_TX;
6005                         new_fltr.src = hw_vsi_id;
6006                 } else {
6007                         new_fltr.flag |= ICE_FLTR_RX;
6008                         new_fltr.src = lport;
6009                 }
6010
6011                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
6012                 new_fltr.vsi_handle = vsi_handle;
6013                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6014                 f_list_entry.fltr_info = new_fltr;
6015                 recp_list = &sw->recp_list[recipe_id];
6016
6017                 status = ice_add_rule_internal(hw, recp_list, lport,
6018                                                &f_list_entry);
6019                 if (status != ICE_SUCCESS)
6020                         goto set_promisc_exit;
6021         }
6022
6023 set_promisc_exit:
6024         return status;
6025 }
6026
6027 /**
6028  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6029  * @hw: pointer to the hardware structure
6030  * @vsi_handle: VSI handle to configure
6031  * @promisc_mask: mask of promiscuous config bits
6032  * @vid: VLAN ID to set VLAN promiscuous
6033  */
6034 enum ice_status
6035 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6036                     u16 vid)
6037 {
6038         return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6039                                     hw->port_info->lport,
6040                                     hw->switch_info);
6041 }
6042
6043 /**
6044  * _ice_set_vlan_vsi_promisc
6045  * @hw: pointer to the hardware structure
6046  * @vsi_handle: VSI handle to configure
6047  * @promisc_mask: mask of promiscuous config bits
6048  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6049  * @lport: logical port number to configure promisc mode
6050  * @sw: pointer to switch info struct for which function add rule
6051  *
6052  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6053  */
6054 static enum ice_status
6055 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6056                           bool rm_vlan_promisc, u8 lport,
6057                           struct ice_switch_info *sw)
6058 {
6059         struct ice_fltr_list_entry *list_itr, *tmp;
6060         struct LIST_HEAD_TYPE vsi_list_head;
6061         struct LIST_HEAD_TYPE *vlan_head;
6062         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6063         enum ice_status status;
6064         u16 vlan_id;
6065
6066         INIT_LIST_HEAD(&vsi_list_head);
6067         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6068         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6069         ice_acquire_lock(vlan_lock);
6070         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6071                                           &vsi_list_head);
6072         ice_release_lock(vlan_lock);
6073         if (status)
6074                 goto free_fltr_list;
6075
6076         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6077                             list_entry) {
6078                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6079                 if (rm_vlan_promisc)
6080                         status =  _ice_clear_vsi_promisc(hw, vsi_handle,
6081                                                          promisc_mask,
6082                                                          vlan_id, sw);
6083                 else
6084                         status =  _ice_set_vsi_promisc(hw, vsi_handle,
6085                                                        promisc_mask, vlan_id,
6086                                                        lport, sw);
6087                 if (status)
6088                         break;
6089         }
6090
6091 free_fltr_list:
6092         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6093                                  ice_fltr_list_entry, list_entry) {
6094                 LIST_DEL(&list_itr->list_entry);
6095                 ice_free(hw, list_itr);
6096         }
6097         return status;
6098 }
6099
6100 /**
6101  * ice_set_vlan_vsi_promisc
6102  * @hw: pointer to the hardware structure
6103  * @vsi_handle: VSI handle to configure
6104  * @promisc_mask: mask of promiscuous config bits
6105  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6106  *
6107  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6108  */
6109 enum ice_status
6110 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6111                          bool rm_vlan_promisc)
6112 {
6113         return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6114                                          rm_vlan_promisc, hw->port_info->lport,
6115                                          hw->switch_info);
6116 }
6117
6118 /**
6119  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6120  * @hw: pointer to the hardware structure
6121  * @vsi_handle: VSI handle to remove filters from
6122  * @recp_list: recipe list from which function remove fltr
6123  * @lkup: switch rule filter lookup type
6124  */
6125 static void
6126 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6127                          struct ice_sw_recipe *recp_list,
6128                          enum ice_sw_lkup_type lkup)
6129 {
6130         struct ice_fltr_list_entry *fm_entry;
6131         struct LIST_HEAD_TYPE remove_list_head;
6132         struct LIST_HEAD_TYPE *rule_head;
6133         struct ice_fltr_list_entry *tmp;
6134         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6135         enum ice_status status;
6136
6137         INIT_LIST_HEAD(&remove_list_head);
6138         rule_lock = &recp_list[lkup].filt_rule_lock;
6139         rule_head = &recp_list[lkup].filt_rules;
6140         ice_acquire_lock(rule_lock);
6141         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6142                                           &remove_list_head);
6143         ice_release_lock(rule_lock);
6144         if (status)
6145                 goto free_fltr_list;
6146
6147         switch (lkup) {
6148         case ICE_SW_LKUP_MAC:
6149                 ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6150                 break;
6151         case ICE_SW_LKUP_VLAN:
6152                 ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6153                 break;
6154         case ICE_SW_LKUP_PROMISC:
6155         case ICE_SW_LKUP_PROMISC_VLAN:
6156                 ice_remove_promisc(hw, lkup, &remove_list_head);
6157                 break;
6158         case ICE_SW_LKUP_MAC_VLAN:
6159                 ice_remove_mac_vlan(hw, &remove_list_head);
6160                 break;
6161         case ICE_SW_LKUP_ETHERTYPE:
6162         case ICE_SW_LKUP_ETHERTYPE_MAC:
6163                 ice_remove_eth_mac(hw, &remove_list_head);
6164                 break;
6165         case ICE_SW_LKUP_DFLT:
6166                 ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6167                 break;
6168         case ICE_SW_LKUP_LAST:
6169                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6170                 break;
6171         }
6172
6173 free_fltr_list:
6174         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6175                                  ice_fltr_list_entry, list_entry) {
6176                 LIST_DEL(&fm_entry->list_entry);
6177                 ice_free(hw, fm_entry);
6178         }
6179 }
6180
6181 /**
6182  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6183  * @hw: pointer to the hardware structure
6184  * @vsi_handle: VSI handle to remove filters from
6185  * @sw: pointer to switch info struct
6186  */
6187 static void
6188 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6189                          struct ice_switch_info *sw)
6190 {
6191         ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6192
6193         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6194                                  sw->recp_list, ICE_SW_LKUP_MAC);
6195         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6196                                  sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6197         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6198                                  sw->recp_list, ICE_SW_LKUP_PROMISC);
6199         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6200                                  sw->recp_list, ICE_SW_LKUP_VLAN);
6201         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6202                                  sw->recp_list, ICE_SW_LKUP_DFLT);
6203         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6204                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6205         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6206                                  sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6207         ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6208                                  sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6209 }
6210
6211 /**
6212  * ice_remove_vsi_fltr - Remove all filters for a VSI
6213  * @hw: pointer to the hardware structure
6214  * @vsi_handle: VSI handle to remove filters from
6215  */
6216 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6217 {
6218         ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6219 }
6220
6221 /**
6222  * ice_alloc_res_cntr - allocating resource counter
6223  * @hw: pointer to the hardware structure
6224  * @type: type of resource
6225  * @alloc_shared: if set it is shared else dedicated
6226  * @num_items: number of entries requested for FD resource type
6227  * @counter_id: counter index returned by AQ call
6228  */
6229 enum ice_status
6230 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6231                    u16 *counter_id)
6232 {
6233         struct ice_aqc_alloc_free_res_elem *buf;
6234         enum ice_status status;
6235         u16 buf_len;
6236
6237         /* Allocate resource */
6238         buf_len = ice_struct_size(buf, elem, 1);
6239         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6240         if (!buf)
6241                 return ICE_ERR_NO_MEMORY;
6242
6243         buf->num_elems = CPU_TO_LE16(num_items);
6244         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6245                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6246
6247         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6248                                        ice_aqc_opc_alloc_res, NULL);
6249         if (status)
6250                 goto exit;
6251
6252         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6253
6254 exit:
6255         ice_free(hw, buf);
6256         return status;
6257 }
6258
6259 /**
6260  * ice_free_res_cntr - free resource counter
6261  * @hw: pointer to the hardware structure
6262  * @type: type of resource
6263  * @alloc_shared: if set it is shared else dedicated
6264  * @num_items: number of entries to be freed for FD resource type
6265  * @counter_id: counter ID resource which needs to be freed
6266  */
6267 enum ice_status
6268 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6269                   u16 counter_id)
6270 {
6271         struct ice_aqc_alloc_free_res_elem *buf;
6272         enum ice_status status;
6273         u16 buf_len;
6274
6275         /* Free resource */
6276         buf_len = ice_struct_size(buf, elem, 1);
6277         buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6278         if (!buf)
6279                 return ICE_ERR_NO_MEMORY;
6280
6281         buf->num_elems = CPU_TO_LE16(num_items);
6282         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6283                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
6284         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6285
6286         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6287                                        ice_aqc_opc_free_res, NULL);
6288         if (status)
6289                 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6290
6291         ice_free(hw, buf);
6292         return status;
6293 }
6294
6295 /**
6296  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6297  * @hw: pointer to the hardware structure
6298  * @counter_id: returns counter index
6299  */
6300 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6301 {
6302         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6303                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6304                                   counter_id);
6305 }
6306
6307 /**
6308  * ice_free_vlan_res_counter - Free counter resource for VLAN type
6309  * @hw: pointer to the hardware structure
6310  * @counter_id: counter index to be freed
6311  */
6312 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6313 {
6314         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6315                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6316                                  counter_id);
6317 }
6318
6319 /**
6320  * ice_alloc_res_lg_act - add large action resource
6321  * @hw: pointer to the hardware structure
6322  * @l_id: large action ID to fill it in
6323  * @num_acts: number of actions to hold with a large action entry
6324  */
6325 static enum ice_status
6326 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6327 {
6328         struct ice_aqc_alloc_free_res_elem *sw_buf;
6329         enum ice_status status;
6330         u16 buf_len;
6331
6332         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6333                 return ICE_ERR_PARAM;
6334
6335         /* Allocate resource for large action */
6336         buf_len = ice_struct_size(sw_buf, elem, 1);
6337         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6338         if (!sw_buf)
6339                 return ICE_ERR_NO_MEMORY;
6340
6341         sw_buf->num_elems = CPU_TO_LE16(1);
6342
6343         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6344          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6345          * If num_acts is greater than 2, then use
6346          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6347          * The num_acts cannot exceed 4. This was ensured at the
6348          * beginning of the function.
6349          */
6350         if (num_acts == 1)
6351                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6352         else if (num_acts == 2)
6353                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6354         else
6355                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6356
6357         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6358                                        ice_aqc_opc_alloc_res, NULL);
6359         if (!status)
6360                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6361
6362         ice_free(hw, sw_buf);
6363         return status;
6364 }
6365
6366 /**
6367  * ice_add_mac_with_sw_marker - add filter with sw marker
6368  * @hw: pointer to the hardware structure
6369  * @f_info: filter info structure containing the MAC filter information
6370  * @sw_marker: sw marker to tag the Rx descriptor with
6371  */
6372 enum ice_status
6373 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6374                            u16 sw_marker)
6375 {
6376         struct ice_fltr_mgmt_list_entry *m_entry;
6377         struct ice_fltr_list_entry fl_info;
6378         struct ice_sw_recipe *recp_list;
6379         struct LIST_HEAD_TYPE l_head;
6380         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6381         enum ice_status ret;
6382         bool entry_exists;
6383         u16 lg_act_id;
6384
6385         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6386                 return ICE_ERR_PARAM;
6387
6388         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6389                 return ICE_ERR_PARAM;
6390
6391         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6392                 return ICE_ERR_PARAM;
6393
6394         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6395                 return ICE_ERR_PARAM;
6396         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6397
6398         /* Add filter if it doesn't exist so then the adding of large
6399          * action always results in update
6400          */
6401
6402         INIT_LIST_HEAD(&l_head);
6403         fl_info.fltr_info = *f_info;
6404         LIST_ADD(&fl_info.list_entry, &l_head);
6405
6406         entry_exists = false;
6407         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6408                                hw->port_info->lport);
6409         if (ret == ICE_ERR_ALREADY_EXISTS)
6410                 entry_exists = true;
6411         else if (ret)
6412                 return ret;
6413
6414         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6415         rule_lock = &recp_list->filt_rule_lock;
6416         ice_acquire_lock(rule_lock);
6417         /* Get the book keeping entry for the filter */
6418         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6419         if (!m_entry)
6420                 goto exit_error;
6421
6422         /* If counter action was enabled for this rule then don't enable
6423          * sw marker large action
6424          */
6425         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6426                 ret = ICE_ERR_PARAM;
6427                 goto exit_error;
6428         }
6429
6430         /* if same marker was added before */
6431         if (m_entry->sw_marker_id == sw_marker) {
6432                 ret = ICE_ERR_ALREADY_EXISTS;
6433                 goto exit_error;
6434         }
6435
6436         /* Allocate a hardware table entry to hold large act. Three actions
6437          * for marker based large action
6438          */
6439         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6440         if (ret)
6441                 goto exit_error;
6442
6443         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6444                 goto exit_error;
6445
6446         /* Update the switch rule to add the marker action */
6447         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6448         if (!ret) {
6449                 ice_release_lock(rule_lock);
6450                 return ret;
6451         }
6452
6453 exit_error:
6454         ice_release_lock(rule_lock);
6455         /* only remove entry if it did not exist previously */
6456         if (!entry_exists)
6457                 ret = ice_remove_mac(hw, &l_head);
6458
6459         return ret;
6460 }
6461
6462 /**
6463  * ice_add_mac_with_counter - add filter with counter enabled
6464  * @hw: pointer to the hardware structure
6465  * @f_info: pointer to filter info structure containing the MAC filter
6466  *          information
6467  */
6468 enum ice_status
6469 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6470 {
6471         struct ice_fltr_mgmt_list_entry *m_entry;
6472         struct ice_fltr_list_entry fl_info;
6473         struct ice_sw_recipe *recp_list;
6474         struct LIST_HEAD_TYPE l_head;
6475         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
6476         enum ice_status ret;
6477         bool entry_exist;
6478         u16 counter_id;
6479         u16 lg_act_id;
6480
6481         if (f_info->fltr_act != ICE_FWD_TO_VSI)
6482                 return ICE_ERR_PARAM;
6483
6484         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6485                 return ICE_ERR_PARAM;
6486
6487         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6488                 return ICE_ERR_PARAM;
6489         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6490         recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6491
6492         entry_exist = false;
6493
6494         rule_lock = &recp_list->filt_rule_lock;
6495
6496         /* Add filter if it doesn't exist so then the adding of large
6497          * action always results in update
6498          */
6499         INIT_LIST_HEAD(&l_head);
6500
6501         fl_info.fltr_info = *f_info;
6502         LIST_ADD(&fl_info.list_entry, &l_head);
6503
6504         ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6505                                hw->port_info->lport);
6506         if (ret == ICE_ERR_ALREADY_EXISTS)
6507                 entry_exist = true;
6508         else if (ret)
6509                 return ret;
6510
6511         ice_acquire_lock(rule_lock);
6512         m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6513         if (!m_entry) {
6514                 ret = ICE_ERR_BAD_PTR;
6515                 goto exit_error;
6516         }
6517
6518         /* Don't enable counter for a filter for which sw marker was enabled */
6519         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6520                 ret = ICE_ERR_PARAM;
6521                 goto exit_error;
6522         }
6523
6524         /* If a counter was already enabled then don't need to add again */
6525         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6526                 ret = ICE_ERR_ALREADY_EXISTS;
6527                 goto exit_error;
6528         }
6529
6530         /* Allocate a hardware table entry to VLAN counter */
6531         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6532         if (ret)
6533                 goto exit_error;
6534
6535         /* Allocate a hardware table entry to hold large act. Two actions for
6536          * counter based large action
6537          */
6538         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6539         if (ret)
6540                 goto exit_error;
6541
6542         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6543                 goto exit_error;
6544
6545         /* Update the switch rule to add the counter action */
6546         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6547         if (!ret) {
6548                 ice_release_lock(rule_lock);
6549                 return ret;
6550         }
6551
6552 exit_error:
6553         ice_release_lock(rule_lock);
6554         /* only remove entry if it did not exist previously */
6555         if (!entry_exist)
6556                 ret = ice_remove_mac(hw, &l_head);
6557
6558         return ret;
6559 }
6560
6561 /* This is mapping table entry that maps every word within a given protocol
6562  * structure to the real byte offset as per the specification of that
6563  * protocol header.
6564  * for example dst address is 3 words in ethertype header and corresponding
6565  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6566  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6567  * matching entry describing its field. This needs to be updated if new
6568  * structure is added to that union.
6569  */
6570 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6571         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
6572         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
6573         { ICE_ETYPE_OL,         { 0 } },
6574         { ICE_VLAN_OFOS,        { 2, 0 } },
6575         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6576         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6577         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6578                                  26, 28, 30, 32, 34, 36, 38 } },
6579         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6580                                  26, 28, 30, 32, 34, 36, 38 } },
6581         { ICE_TCP_IL,           { 0, 2 } },
6582         { ICE_UDP_OF,           { 0, 2 } },
6583         { ICE_UDP_ILOS,         { 0, 2 } },
6584         { ICE_SCTP_IL,          { 0, 2 } },
6585         { ICE_VXLAN,            { 8, 10, 12, 14 } },
6586         { ICE_GENEVE,           { 8, 10, 12, 14 } },
6587         { ICE_VXLAN_GPE,        { 8, 10, 12, 14 } },
6588         { ICE_NVGRE,            { 0, 2, 4, 6 } },
6589         { ICE_GTP,              { 8, 10, 12, 14, 16, 18, 20, 22 } },
6590         { ICE_PPPOE,            { 0, 2, 4, 6 } },
6591         { ICE_PFCP,             { 8, 10, 12, 14, 16, 18, 20, 22 } },
6592         { ICE_L2TPV3,           { 0, 2, 4, 6, 8, 10 } },
6593         { ICE_ESP,              { 0, 2, 4, 6 } },
6594         { ICE_AH,               { 0, 2, 4, 6, 8, 10 } },
6595         { ICE_NAT_T,            { 8, 10, 12, 14 } },
6596         { ICE_GTP_NO_PAY,       { 8, 10, 12, 14 } },
6597         { ICE_VLAN_EX,          { 2, 0 } },
6598         { ICE_VLAN_IN,          { 2, 0 } },
6599 };
6600
6601 /* The following table describes preferred grouping of recipes.
6602  * If a recipe that needs to be programmed is a superset or matches one of the
6603  * following combinations, then the recipe needs to be chained as per the
6604  * following policy.
6605  */
6606
6607 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6608         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
6609         { ICE_MAC_IL,           ICE_MAC_IL_HW },
6610         { ICE_ETYPE_OL,         ICE_ETYPE_OL_HW },
6611         { ICE_VLAN_OFOS,        ICE_VLAN_OL_HW },
6612         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
6613         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
6614         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
6615         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
6616         { ICE_TCP_IL,           ICE_TCP_IL_HW },
6617         { ICE_UDP_OF,           ICE_UDP_OF_HW },
6618         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
6619         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
6620         { ICE_VXLAN,            ICE_UDP_OF_HW },
6621         { ICE_GENEVE,           ICE_UDP_OF_HW },
6622         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
6623         { ICE_NVGRE,            ICE_GRE_OF_HW },
6624         { ICE_GTP,              ICE_UDP_OF_HW },
6625         { ICE_PPPOE,            ICE_PPPOE_HW },
6626         { ICE_PFCP,             ICE_UDP_ILOS_HW },
6627         { ICE_L2TPV3,           ICE_L2TPV3_HW },
6628         { ICE_ESP,              ICE_ESP_HW },
6629         { ICE_AH,               ICE_AH_HW },
6630         { ICE_NAT_T,            ICE_UDP_ILOS_HW },
6631         { ICE_GTP_NO_PAY,       ICE_UDP_ILOS_HW },
6632         { ICE_VLAN_EX,          ICE_VLAN_OF_HW },
6633         { ICE_VLAN_IN,          ICE_VLAN_OL_HW },
6634 };
6635
6636 /**
6637  * ice_find_recp - find a recipe
6638  * @hw: pointer to the hardware structure
6639  * @lkup_exts: extension sequence to match
6640  *
6641  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6642  */
6643 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6644                          enum ice_sw_tunnel_type tun_type, u32 priority)
6645 {
6646         bool refresh_required = true;
6647         struct ice_sw_recipe *recp;
6648         u8 i;
6649
6650         /* Walk through existing recipes to find a match */
6651         recp = hw->switch_info->recp_list;
6652         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6653                 /* If recipe was not created for this ID, in SW bookkeeping,
6654                  * check if FW has an entry for this recipe. If the FW has an
6655                  * entry update it in our SW bookkeeping and continue with the
6656                  * matching.
6657                  */
6658                 if (!recp[i].recp_created)
6659                         if (ice_get_recp_frm_fw(hw,
6660                                                 hw->switch_info->recp_list, i,
6661                                                 &refresh_required))
6662                                 continue;
6663
6664                 /* Skip inverse action recipes */
6665                 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6666                     ICE_AQ_RECIPE_ACT_INV_ACT)
6667                         continue;
6668
6669                 /* if number of words we are looking for match */
6670                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6671                         struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6672                         struct ice_fv_word *be = lkup_exts->fv_words;
6673                         u16 *cr = recp[i].lkup_exts.field_mask;
6674                         u16 *de = lkup_exts->field_mask;
6675                         bool found = true;
6676                         u8 pe, qr;
6677
6678                         /* ar, cr, and qr are related to the recipe words, while
6679                          * be, de, and pe are related to the lookup words
6680                          */
6681                         for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6682                                 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6683                                      qr++) {
6684                                         if (ar[qr].off == be[pe].off &&
6685                                             ar[qr].prot_id == be[pe].prot_id &&
6686                                             cr[qr] == de[pe])
6687                                                 /* Found the "pe"th word in the
6688                                                  * given recipe
6689                                                  */
6690                                                 break;
6691                                 }
6692                                 /* After walking through all the words in the
6693                                  * "i"th recipe if "p"th word was not found then
6694                                  * this recipe is not what we are looking for.
6695                                  * So break out from this loop and try the next
6696                                  * recipe
6697                                  */
6698                                 if (qr >= recp[i].lkup_exts.n_val_words) {
6699                                         found = false;
6700                                         break;
6701                                 }
6702                         }
6703                         /* If for "i"th recipe the found was never set to false
6704                          * then it means we found our match
6705                          */
6706                         if (tun_type == recp[i].tun_type && found &&
6707                             priority == recp[i].priority)
6708                                 return i; /* Return the recipe ID */
6709                 }
6710         }
6711         return ICE_MAX_NUM_RECIPES;
6712 }
6713
6714 /**
6715  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6716  *
6717  * As protocol id for outer vlan is different in dvm and svm, if dvm is
6718  * supported protocol array record for outer vlan has to be modified to
6719  * reflect the value proper for DVM.
6720  */
6721 void ice_change_proto_id_to_dvm(void)
6722 {
6723         u8 i;
6724
6725         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6726                 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6727                     ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6728                         ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6729 }
6730
6731 /**
6732  * ice_prot_type_to_id - get protocol ID from protocol type
6733  * @type: protocol type
6734  * @id: pointer to variable that will receive the ID
6735  *
6736  * Returns true if found, false otherwise
6737  */
6738 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6739 {
6740         u8 i;
6741
6742         for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6743                 if (ice_prot_id_tbl[i].type == type) {
6744                         *id = ice_prot_id_tbl[i].protocol_id;
6745                         return true;
6746                 }
6747         return false;
6748 }
6749
6750 /**
6751  * ice_fill_valid_words - count valid words
6752  * @rule: advanced rule with lookup information
6753  * @lkup_exts: byte offset extractions of the words that are valid
6754  *
6755  * calculate valid words in a lookup rule using mask value
6756  */
6757 static u8
6758 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6759                      struct ice_prot_lkup_ext *lkup_exts)
6760 {
6761         u8 j, word, prot_id, ret_val;
6762
6763         if (!ice_prot_type_to_id(rule->type, &prot_id))
6764                 return 0;
6765
6766         word = lkup_exts->n_val_words;
6767
6768         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6769                 if (((u16 *)&rule->m_u)[j] &&
6770                     (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6771                         /* No more space to accommodate */
6772                         if (word >= ICE_MAX_CHAIN_WORDS)
6773                                 return 0;
6774                         lkup_exts->fv_words[word].off =
6775                                 ice_prot_ext[rule->type].offs[j];
6776                         lkup_exts->fv_words[word].prot_id =
6777                                 ice_prot_id_tbl[rule->type].protocol_id;
6778                         lkup_exts->field_mask[word] =
6779                                 BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6780                         word++;
6781                 }
6782
6783         ret_val = word - lkup_exts->n_val_words;
6784         lkup_exts->n_val_words = word;
6785
6786         return ret_val;
6787 }
6788
6789 /**
6790  * ice_create_first_fit_recp_def - Create a recipe grouping
6791  * @hw: pointer to the hardware structure
6792  * @lkup_exts: an array of protocol header extractions
6793  * @rg_list: pointer to a list that stores new recipe groups
6794  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6795  *
6796  * Using first fit algorithm, take all the words that are still not done
6797  * and start grouping them in 4-word groups. Each group makes up one
6798  * recipe.
6799  */
6800 static enum ice_status
6801 ice_create_first_fit_recp_def(struct ice_hw *hw,
6802                               struct ice_prot_lkup_ext *lkup_exts,
6803                               struct LIST_HEAD_TYPE *rg_list,
6804                               u8 *recp_cnt)
6805 {
6806         struct ice_pref_recipe_group *grp = NULL;
6807         u8 j;
6808
6809         *recp_cnt = 0;
6810
6811         if (!lkup_exts->n_val_words) {
6812                 struct ice_recp_grp_entry *entry;
6813
6814                 entry = (struct ice_recp_grp_entry *)
6815                         ice_malloc(hw, sizeof(*entry));
6816                 if (!entry)
6817                         return ICE_ERR_NO_MEMORY;
6818                 LIST_ADD(&entry->l_entry, rg_list);
6819                 grp = &entry->r_group;
6820                 (*recp_cnt)++;
6821                 grp->n_val_pairs = 0;
6822         }
6823
6824         /* Walk through every word in the rule to check if it is not done. If so
6825          * then this word needs to be part of a new recipe.
6826          */
6827         for (j = 0; j < lkup_exts->n_val_words; j++)
6828                 if (!ice_is_bit_set(lkup_exts->done, j)) {
6829                         if (!grp ||
6830                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6831                                 struct ice_recp_grp_entry *entry;
6832
6833                                 entry = (struct ice_recp_grp_entry *)
6834                                         ice_malloc(hw, sizeof(*entry));
6835                                 if (!entry)
6836                                         return ICE_ERR_NO_MEMORY;
6837                                 LIST_ADD(&entry->l_entry, rg_list);
6838                                 grp = &entry->r_group;
6839                                 (*recp_cnt)++;
6840                         }
6841
6842                         grp->pairs[grp->n_val_pairs].prot_id =
6843                                 lkup_exts->fv_words[j].prot_id;
6844                         grp->pairs[grp->n_val_pairs].off =
6845                                 lkup_exts->fv_words[j].off;
6846                         grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6847                         grp->n_val_pairs++;
6848                 }
6849
6850         return ICE_SUCCESS;
6851 }
6852
6853 /**
6854  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6855  * @hw: pointer to the hardware structure
6856  * @fv_list: field vector with the extraction sequence information
6857  * @rg_list: recipe groupings with protocol-offset pairs
6858  *
6859  * Helper function to fill in the field vector indices for protocol-offset
6860  * pairs. These indexes are then ultimately programmed into a recipe.
6861  */
6862 static enum ice_status
6863 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6864                        struct LIST_HEAD_TYPE *rg_list)
6865 {
6866         struct ice_sw_fv_list_entry *fv;
6867         struct ice_recp_grp_entry *rg;
6868         struct ice_fv_word *fv_ext;
6869
6870         if (LIST_EMPTY(fv_list))
6871                 return ICE_SUCCESS;
6872
6873         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6874         fv_ext = fv->fv_ptr->ew;
6875
6876         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6877                 u8 i;
6878
6879                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6880                         struct ice_fv_word *pr;
6881                         bool found = false;
6882                         u16 mask;
6883                         u8 j;
6884
6885                         pr = &rg->r_group.pairs[i];
6886                         mask = rg->r_group.mask[i];
6887
6888                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6889                                 if (fv_ext[j].prot_id == pr->prot_id &&
6890                                     fv_ext[j].off == pr->off) {
6891                                         found = true;
6892
6893                                         /* Store index of field vector */
6894                                         rg->fv_idx[i] = j;
6895                                         rg->fv_mask[i] = mask;
6896                                         break;
6897                                 }
6898
6899                         /* Protocol/offset could not be found, caller gave an
6900                          * invalid pair
6901                          */
6902                         if (!found)
6903                                 return ICE_ERR_PARAM;
6904                 }
6905         }
6906
6907         return ICE_SUCCESS;
6908 }
6909
6910 /**
6911  * ice_find_free_recp_res_idx - find free result indexes for recipe
6912  * @hw: pointer to hardware structure
6913  * @profiles: bitmap of profiles that will be associated with the new recipe
6914  * @free_idx: pointer to variable to receive the free index bitmap
6915  *
6916  * The algorithm used here is:
6917  *      1. When creating a new recipe, create a set P which contains all
6918  *         Profiles that will be associated with our new recipe
6919  *
6920  *      2. For each Profile p in set P:
6921  *          a. Add all recipes associated with Profile p into set R
6922  *          b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6923  *              [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6924  *              i. Or just assume they all have the same possible indexes:
6925  *                      44, 45, 46, 47
6926  *                      i.e., PossibleIndexes = 0x0000F00000000000
6927  *
6928  *      3. For each Recipe r in set R:
6929  *          a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6930  *          b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6931  *
6932  *      FreeIndexes will contain the bits indicating the indexes free for use,
6933  *      then the code needs to update the recipe[r].used_result_idx_bits to
6934  *      indicate which indexes were selected for use by this recipe.
6935  */
6936 static u16
6937 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6938                            ice_bitmap_t *free_idx)
6939 {
6940         ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6941         ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6942         ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6943         u16 bit;
6944
6945         ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6946         ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6947         ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6948         ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6949
6950         ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6951
6952         /* For each profile we are going to associate the recipe with, add the
6953          * recipes that are associated with that profile. This will give us
6954          * the set of recipes that our recipe may collide with. Also, determine
6955          * what possible result indexes are usable given this set of profiles.
6956          */
6957         ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6958                 ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6959                               ICE_MAX_NUM_RECIPES);
6960                 ice_and_bitmap(possible_idx, possible_idx,
6961                                hw->switch_info->prof_res_bm[bit],
6962                                ICE_MAX_FV_WORDS);
6963         }
6964
6965         /* For each recipe that our new recipe may collide with, determine
6966          * which indexes have been used.
6967          */
6968         ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6969                 ice_or_bitmap(used_idx, used_idx,
6970                               hw->switch_info->recp_list[bit].res_idxs,
6971                               ICE_MAX_FV_WORDS);
6972
6973         ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6974
6975         /* return number of free indexes */
6976         return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6977 }
6978
6979 /**
6980  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6981  * @hw: pointer to hardware structure
6982  * @rm: recipe management list entry
6983  * @profiles: bitmap of profiles that will be associated.
6984  */
6985 static enum ice_status
6986 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6987                   ice_bitmap_t *profiles)
6988 {
6989         ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6990         struct ice_aqc_recipe_data_elem *tmp;
6991         struct ice_aqc_recipe_data_elem *buf;
6992         struct ice_recp_grp_entry *entry;
6993         enum ice_status status;
6994         u16 free_res_idx;
6995         u16 recipe_count;
6996         u8 chain_idx;
6997         u8 recps = 0;
6998
6999         /* When more than one recipe are required, another recipe is needed to
7000          * chain them together. Matching a tunnel metadata ID takes up one of
7001          * the match fields in the chaining recipe reducing the number of
7002          * chained recipes by one.
7003          */
7004          /* check number of free result indices */
7005         ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7006         free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7007
7008         ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7009                   free_res_idx, rm->n_grp_count);
7010
7011         if (rm->n_grp_count > 1) {
7012                 if (rm->n_grp_count > free_res_idx)
7013                         return ICE_ERR_MAX_LIMIT;
7014
7015                 rm->n_grp_count++;
7016         }
7017
7018         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7019                 return ICE_ERR_MAX_LIMIT;
7020
7021         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7022                                                             ICE_MAX_NUM_RECIPES,
7023                                                             sizeof(*tmp));
7024         if (!tmp)
7025                 return ICE_ERR_NO_MEMORY;
7026
7027         buf = (struct ice_aqc_recipe_data_elem *)
7028                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7029         if (!buf) {
7030                 status = ICE_ERR_NO_MEMORY;
7031                 goto err_mem;
7032         }
7033
7034         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7035         recipe_count = ICE_MAX_NUM_RECIPES;
7036         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7037                                    NULL);
7038         if (status || recipe_count == 0)
7039                 goto err_unroll;
7040
7041         /* Allocate the recipe resources, and configure them according to the
7042          * match fields from protocol headers and extracted field vectors.
7043          */
7044         chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7045         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7046                 u8 i;
7047
7048                 status = ice_alloc_recipe(hw, &entry->rid);
7049                 if (status)
7050                         goto err_unroll;
7051
7052                 /* Clear the result index of the located recipe, as this will be
7053                  * updated, if needed, later in the recipe creation process.
7054                  */
7055                 tmp[0].content.result_indx = 0;
7056
7057                 buf[recps] = tmp[0];
7058                 buf[recps].recipe_indx = (u8)entry->rid;
7059                 /* if the recipe is a non-root recipe RID should be programmed
7060                  * as 0 for the rules to be applied correctly.
7061                  */
7062                 buf[recps].content.rid = 0;
7063                 ice_memset(&buf[recps].content.lkup_indx, 0,
7064                            sizeof(buf[recps].content.lkup_indx),
7065                            ICE_NONDMA_MEM);
7066
7067                 /* All recipes use look-up index 0 to match switch ID. */
7068                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7069                 buf[recps].content.mask[0] =
7070                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7071                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7072                  * to be 0
7073                  */
7074                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7075                         buf[recps].content.lkup_indx[i] = 0x80;
7076                         buf[recps].content.mask[i] = 0;
7077                 }
7078
7079                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7080                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7081                         buf[recps].content.mask[i + 1] =
7082                                 CPU_TO_LE16(entry->fv_mask[i]);
7083                 }
7084
7085                 if (rm->n_grp_count > 1) {
7086                         /* Checks to see if there really is a valid result index
7087                          * that can be used.
7088                          */
7089                         if (chain_idx >= ICE_MAX_FV_WORDS) {
7090                                 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7091                                 status = ICE_ERR_MAX_LIMIT;
7092                                 goto err_unroll;
7093                         }
7094
7095                         entry->chain_idx = chain_idx;
7096                         buf[recps].content.result_indx =
7097                                 ICE_AQ_RECIPE_RESULT_EN |
7098                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7099                                  ICE_AQ_RECIPE_RESULT_DATA_M);
7100                         ice_clear_bit(chain_idx, result_idx_bm);
7101                         chain_idx = ice_find_first_bit(result_idx_bm,
7102                                                        ICE_MAX_FV_WORDS);
7103                 }
7104
7105                 /* fill recipe dependencies */
7106                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7107                                 ICE_MAX_NUM_RECIPES);
7108                 ice_set_bit(buf[recps].recipe_indx,
7109                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
7110                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7111                 recps++;
7112         }
7113
7114         if (rm->n_grp_count == 1) {
7115                 rm->root_rid = buf[0].recipe_indx;
7116                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7117                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7118                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7119                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7120                                    sizeof(buf[0].recipe_bitmap),
7121                                    ICE_NONDMA_TO_NONDMA);
7122                 } else {
7123                         status = ICE_ERR_BAD_PTR;
7124                         goto err_unroll;
7125                 }
7126                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
7127                  * the recipe which is getting created if specified
7128                  * by user. Usually any advanced switch filter, which results
7129                  * into new extraction sequence, ended up creating a new recipe
7130                  * of type ROOT and usually recipes are associated with profiles
7131                  * Switch rule referreing newly created recipe, needs to have
7132                  * either/or 'fwd' or 'join' priority, otherwise switch rule
7133                  * evaluation will not happen correctly. In other words, if
7134                  * switch rule to be evaluated on priority basis, then recipe
7135                  * needs to have priority, otherwise it will be evaluated last.
7136                  */
7137                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
7138         } else {
7139                 struct ice_recp_grp_entry *last_chain_entry;
7140                 u16 rid, i;
7141
7142                 /* Allocate the last recipe that will chain the outcomes of the
7143                  * other recipes together
7144                  */
7145                 status = ice_alloc_recipe(hw, &rid);
7146                 if (status)
7147                         goto err_unroll;
7148
7149                 buf[recps].recipe_indx = (u8)rid;
7150                 buf[recps].content.rid = (u8)rid;
7151                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7152                 /* the new entry created should also be part of rg_list to
7153                  * make sure we have complete recipe
7154                  */
7155                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7156                         sizeof(*last_chain_entry));
7157                 if (!last_chain_entry) {
7158                         status = ICE_ERR_NO_MEMORY;
7159                         goto err_unroll;
7160                 }
7161                 last_chain_entry->rid = rid;
7162                 ice_memset(&buf[recps].content.lkup_indx, 0,
7163                            sizeof(buf[recps].content.lkup_indx),
7164                            ICE_NONDMA_MEM);
7165                 /* All recipes use look-up index 0 to match switch ID. */
7166                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7167                 buf[recps].content.mask[0] =
7168                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7169                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7170                         buf[recps].content.lkup_indx[i] =
7171                                 ICE_AQ_RECIPE_LKUP_IGNORE;
7172                         buf[recps].content.mask[i] = 0;
7173                 }
7174
7175                 i = 1;
7176                 /* update r_bitmap with the recp that is used for chaining */
7177                 ice_set_bit(rid, rm->r_bitmap);
7178                 /* this is the recipe that chains all the other recipes so it
7179                  * should not have a chaining ID to indicate the same
7180                  */
7181                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7182                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7183                                     l_entry) {
7184                         last_chain_entry->fv_idx[i] = entry->chain_idx;
7185                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
7186                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7187                         ice_set_bit(entry->rid, rm->r_bitmap);
7188                 }
7189                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7190                 if (sizeof(buf[recps].recipe_bitmap) >=
7191                     sizeof(rm->r_bitmap)) {
7192                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7193                                    sizeof(buf[recps].recipe_bitmap),
7194                                    ICE_NONDMA_TO_NONDMA);
7195                 } else {
7196                         status = ICE_ERR_BAD_PTR;
7197                         goto err_unroll;
7198                 }
7199                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7200
7201                 recps++;
7202                 rm->root_rid = (u8)rid;
7203         }
7204         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7205         if (status)
7206                 goto err_unroll;
7207
7208         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7209         ice_release_change_lock(hw);
7210         if (status)
7211                 goto err_unroll;
7212
7213         /* Every recipe that just got created add it to the recipe
7214          * book keeping list
7215          */
7216         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7217                 struct ice_switch_info *sw = hw->switch_info;
7218                 bool is_root, idx_found = false;
7219                 struct ice_sw_recipe *recp;
7220                 u16 idx, buf_idx = 0;
7221
7222                 /* find buffer index for copying some data */
7223                 for (idx = 0; idx < rm->n_grp_count; idx++)
7224                         if (buf[idx].recipe_indx == entry->rid) {
7225                                 buf_idx = idx;
7226                                 idx_found = true;
7227                         }
7228
7229                 if (!idx_found) {
7230                         status = ICE_ERR_OUT_OF_RANGE;
7231                         goto err_unroll;
7232                 }
7233
7234                 recp = &sw->recp_list[entry->rid];
7235                 is_root = (rm->root_rid == entry->rid);
7236                 recp->is_root = is_root;
7237
7238                 recp->root_rid = entry->rid;
7239                 recp->big_recp = (is_root && rm->n_grp_count > 1);
7240
7241                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7242                            entry->r_group.n_val_pairs *
7243                            sizeof(struct ice_fv_word),
7244                            ICE_NONDMA_TO_NONDMA);
7245
7246                 ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7247                            sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7248
7249                 /* Copy non-result fv index values and masks to recipe. This
7250                  * call will also update the result recipe bitmask.
7251                  */
7252                 ice_collect_result_idx(&buf[buf_idx], recp);
7253
7254                 /* for non-root recipes, also copy to the root, this allows
7255                  * easier matching of a complete chained recipe
7256                  */
7257                 if (!is_root)
7258                         ice_collect_result_idx(&buf[buf_idx],
7259                                                &sw->recp_list[rm->root_rid]);
7260
7261                 recp->n_ext_words = entry->r_group.n_val_pairs;
7262                 recp->chain_idx = entry->chain_idx;
7263                 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7264                 recp->n_grp_count = rm->n_grp_count;
7265                 recp->tun_type = rm->tun_type;
7266                 recp->recp_created = true;
7267         }
7268         rm->root_buf = buf;
7269         ice_free(hw, tmp);
7270         return status;
7271
7272 err_unroll:
7273 err_mem:
7274         ice_free(hw, tmp);
7275         ice_free(hw, buf);
7276         return status;
7277 }
7278
7279 /**
7280  * ice_create_recipe_group - creates recipe group
7281  * @hw: pointer to hardware structure
7282  * @rm: recipe management list entry
7283  * @lkup_exts: lookup elements
7284  */
7285 static enum ice_status
7286 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7287                         struct ice_prot_lkup_ext *lkup_exts)
7288 {
7289         enum ice_status status;
7290         u8 recp_count = 0;
7291
7292         rm->n_grp_count = 0;
7293
7294         /* Create recipes for words that are marked not done by packing them
7295          * as best fit.
7296          */
7297         status = ice_create_first_fit_recp_def(hw, lkup_exts,
7298                                                &rm->rg_list, &recp_count);
7299         if (!status) {
7300                 rm->n_grp_count += recp_count;
7301                 rm->n_ext_words = lkup_exts->n_val_words;
7302                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7303                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7304                 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7305                            sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7306         }
7307
7308         return status;
7309 }
7310
7311 /**
7312  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7313  * @hw: pointer to hardware structure
7314  * @lkups: lookup elements or match criteria for the advanced recipe, one
7315  *         structure per protocol header
7316  * @lkups_cnt: number of protocols
7317  * @bm: bitmap of field vectors to consider
7318  * @fv_list: pointer to a list that holds the returned field vectors
7319  */
7320 static enum ice_status
7321 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7322            ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7323 {
7324         enum ice_status status;
7325         u8 *prot_ids;
7326         u16 i;
7327
7328         if (!lkups_cnt)
7329                 return ICE_SUCCESS;
7330
7331         prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7332         if (!prot_ids)
7333                 return ICE_ERR_NO_MEMORY;
7334
7335         for (i = 0; i < lkups_cnt; i++)
7336                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7337                         status = ICE_ERR_CFG;
7338                         goto free_mem;
7339                 }
7340
7341         /* Find field vectors that include all specified protocol types */
7342         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7343
7344 free_mem:
7345         ice_free(hw, prot_ids);
7346         return status;
7347 }
7348
7349 /**
7350  * ice_tun_type_match_word - determine if tun type needs a match mask
7351  * @tun_type: tunnel type
7352  * @mask: mask to be used for the tunnel
7353  */
7354 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7355 {
7356         switch (tun_type) {
7357         case ICE_SW_TUN_VXLAN_GPE:
7358         case ICE_SW_TUN_GENEVE:
7359         case ICE_SW_TUN_VXLAN:
7360         case ICE_SW_TUN_NVGRE:
7361         case ICE_SW_TUN_UDP:
7362         case ICE_ALL_TUNNELS:
7363         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7364         case ICE_NON_TUN_QINQ:
7365         case ICE_SW_TUN_PPPOE_QINQ:
7366         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7367         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7368         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7369                 *mask = ICE_TUN_FLAG_MASK;
7370                 return true;
7371
7372         case ICE_SW_TUN_GENEVE_VLAN:
7373         case ICE_SW_TUN_VXLAN_VLAN:
7374                 *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7375                 return true;
7376
7377         default:
7378                 *mask = 0;
7379                 return false;
7380         }
7381 }
7382
7383 /**
7384  * ice_add_special_words - Add words that are not protocols, such as metadata
7385  * @rinfo: other information regarding the rule e.g. priority and action info
7386  * @lkup_exts: lookup word structure
7387  */
7388 static enum ice_status
7389 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7390                       struct ice_prot_lkup_ext *lkup_exts)
7391 {
7392         u16 mask;
7393
7394         /* If this is a tunneled packet, then add recipe index to match the
7395          * tunnel bit in the packet metadata flags.
7396          */
7397         if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7398                 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7399                         u8 word = lkup_exts->n_val_words++;
7400
7401                         lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7402                         lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7403                         lkup_exts->field_mask[word] = mask;
7404                 } else {
7405                         return ICE_ERR_MAX_LIMIT;
7406                 }
7407         }
7408
7409         return ICE_SUCCESS;
7410 }
7411
7412 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7413  * @hw: pointer to hardware structure
7414  * @rinfo: other information regarding the rule e.g. priority and action info
7415  * @bm: pointer to memory for returning the bitmap of field vectors
7416  */
7417 static void
7418 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7419                          ice_bitmap_t *bm)
7420 {
7421         enum ice_prof_type prof_type;
7422
7423         ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7424
7425         switch (rinfo->tun_type) {
7426         case ICE_NON_TUN:
7427         case ICE_NON_TUN_QINQ:
7428                 prof_type = ICE_PROF_NON_TUN;
7429                 break;
7430         case ICE_ALL_TUNNELS:
7431                 prof_type = ICE_PROF_TUN_ALL;
7432                 break;
7433         case ICE_SW_TUN_VXLAN_GPE:
7434         case ICE_SW_TUN_GENEVE:
7435         case ICE_SW_TUN_GENEVE_VLAN:
7436         case ICE_SW_TUN_VXLAN:
7437         case ICE_SW_TUN_VXLAN_VLAN:
7438         case ICE_SW_TUN_UDP:
7439         case ICE_SW_TUN_GTP:
7440                 prof_type = ICE_PROF_TUN_UDP;
7441                 break;
7442         case ICE_SW_TUN_NVGRE:
7443                 prof_type = ICE_PROF_TUN_GRE;
7444                 break;
7445         case ICE_SW_TUN_PPPOE:
7446         case ICE_SW_TUN_PPPOE_QINQ:
7447                 prof_type = ICE_PROF_TUN_PPPOE;
7448                 break;
7449         case ICE_SW_TUN_PPPOE_PAY:
7450         case ICE_SW_TUN_PPPOE_PAY_QINQ:
7451                 ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7452                 return;
7453         case ICE_SW_TUN_PPPOE_IPV4:
7454         case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7455                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7456                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7457                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7458                 return;
7459         case ICE_SW_TUN_PPPOE_IPV4_TCP:
7460                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7461                 return;
7462         case ICE_SW_TUN_PPPOE_IPV4_UDP:
7463                 ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7464                 return;
7465         case ICE_SW_TUN_PPPOE_IPV6:
7466         case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7467                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7468                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7469                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7470                 return;
7471         case ICE_SW_TUN_PPPOE_IPV6_TCP:
7472                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7473                 return;
7474         case ICE_SW_TUN_PPPOE_IPV6_UDP:
7475                 ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7476                 return;
7477         case ICE_SW_TUN_PROFID_IPV6_ESP:
7478         case ICE_SW_TUN_IPV6_ESP:
7479                 ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7480                 return;
7481         case ICE_SW_TUN_PROFID_IPV6_AH:
7482         case ICE_SW_TUN_IPV6_AH:
7483                 ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7484                 return;
7485         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7486         case ICE_SW_TUN_IPV6_L2TPV3:
7487                 ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7488                 return;
7489         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7490         case ICE_SW_TUN_IPV6_NAT_T:
7491                 ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7492                 return;
7493         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7494                 ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7495                 return;
7496         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7497                 ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7498                 return;
7499         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7500                 ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7501                 return;
7502         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7503                 ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7504                 return;
7505         case ICE_SW_TUN_IPV4_NAT_T:
7506                 ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7507                 return;
7508         case ICE_SW_TUN_IPV4_L2TPV3:
7509                 ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7510                 return;
7511         case ICE_SW_TUN_IPV4_ESP:
7512                 ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7513                 return;
7514         case ICE_SW_TUN_IPV4_AH:
7515                 ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7516                 return;
7517         case ICE_SW_IPV4_TCP:
7518                 ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7519                 return;
7520         case ICE_SW_IPV4_UDP:
7521                 ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7522                 return;
7523         case ICE_SW_IPV6_TCP:
7524                 ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7525                 return;
7526         case ICE_SW_IPV6_UDP:
7527                 ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7528                 return;
7529         case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7530                 ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7531                 return;
7532         case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7533                 ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7534                 return;
7535         case ICE_SW_TUN_IPV4_GTPU_IPV4:
7536                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7537                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7538                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7539                 return;
7540         case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7541                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7542                 return;
7543         case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7544                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7545                 return;
7546         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7547                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7548                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7549                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7550                 return;
7551         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7552                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7553                 return;
7554         case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7555                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7556                 return;
7557         case ICE_SW_TUN_IPV6_GTPU_IPV4:
7558                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7559                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7560                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7561                 return;
7562         case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7563                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7564                 return;
7565         case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7566                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7567                 return;
7568         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7569                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7570                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7571                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7572                 return;
7573         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7574                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7575                 return;
7576         case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7577                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7578                 return;
7579         case ICE_SW_TUN_IPV4_GTPU_IPV6:
7580                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7581                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7582                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7583                 return;
7584         case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7585                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7586                 return;
7587         case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7588                 ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7589                 return;
7590         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7591                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7592                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7593                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7594                 return;
7595         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7596                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7597                 return;
7598         case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7599                 ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7600                 return;
7601         case ICE_SW_TUN_IPV6_GTPU_IPV6:
7602                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7603                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7604                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7605                 return;
7606         case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7607                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7608                 return;
7609         case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7610                 ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7611                 return;
7612         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7613                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7614                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7615                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7616                 return;
7617         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7618                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7619                 return;
7620         case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7621                 ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7622                 return;
7623         case ICE_SW_TUN_AND_NON_TUN:
7624         case ICE_SW_TUN_AND_NON_TUN_QINQ:
7625         default:
7626                 prof_type = ICE_PROF_ALL;
7627                 break;
7628         }
7629
7630         ice_get_sw_fv_bitmap(hw, prof_type, bm);
7631 }
7632
7633 /**
7634  * ice_is_prof_rule - determine if rule type is a profile rule
7635  * @type: the rule type
7636  *
7637  * if the rule type is a profile rule, that means that there no field value
7638  * match required, in this case just a profile hit is required.
7639  */
7640 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7641 {
7642         switch (type) {
7643         case ICE_SW_TUN_PROFID_IPV6_ESP:
7644         case ICE_SW_TUN_PROFID_IPV6_AH:
7645         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7646         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7647         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7648         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7649         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7650         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7651                 return true;
7652         default:
7653                 break;
7654         }
7655
7656         return false;
7657 }
7658
7659 /**
7660  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7661  * @hw: pointer to hardware structure
7662  * @lkups: lookup elements or match criteria for the advanced recipe, one
7663  *  structure per protocol header
7664  * @lkups_cnt: number of protocols
7665  * @rinfo: other information regarding the rule e.g. priority and action info
7666  * @rid: return the recipe ID of the recipe created
7667  */
7668 static enum ice_status
7669 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7670                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7671 {
7672         ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7673         ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7674         struct ice_prot_lkup_ext *lkup_exts;
7675         struct ice_recp_grp_entry *r_entry;
7676         struct ice_sw_fv_list_entry *fvit;
7677         struct ice_recp_grp_entry *r_tmp;
7678         struct ice_sw_fv_list_entry *tmp;
7679         enum ice_status status = ICE_SUCCESS;
7680         struct ice_sw_recipe *rm;
7681         u8 i;
7682
7683         if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7684                 return ICE_ERR_PARAM;
7685
7686         lkup_exts = (struct ice_prot_lkup_ext *)
7687                 ice_malloc(hw, sizeof(*lkup_exts));
7688         if (!lkup_exts)
7689                 return ICE_ERR_NO_MEMORY;
7690
7691         /* Determine the number of words to be matched and if it exceeds a
7692          * recipe's restrictions
7693          */
7694         for (i = 0; i < lkups_cnt; i++) {
7695                 u16 count;
7696
7697                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7698                         status = ICE_ERR_CFG;
7699                         goto err_free_lkup_exts;
7700                 }
7701
7702                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
7703                 if (!count) {
7704                         status = ICE_ERR_CFG;
7705                         goto err_free_lkup_exts;
7706                 }
7707         }
7708
7709         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7710         if (!rm) {
7711                 status = ICE_ERR_NO_MEMORY;
7712                 goto err_free_lkup_exts;
7713         }
7714
7715         /* Get field vectors that contain fields extracted from all the protocol
7716          * headers being programmed.
7717          */
7718         INIT_LIST_HEAD(&rm->fv_list);
7719         INIT_LIST_HEAD(&rm->rg_list);
7720
7721         /* Get bitmap of field vectors (profiles) that are compatible with the
7722          * rule request; only these will be searched in the subsequent call to
7723          * ice_get_fv.
7724          */
7725         ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7726
7727         status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7728         if (status)
7729                 goto err_unroll;
7730
7731         /* Create any special protocol/offset pairs, such as looking at tunnel
7732          * bits by extracting metadata
7733          */
7734         status = ice_add_special_words(rinfo, lkup_exts);
7735         if (status)
7736                 goto err_free_lkup_exts;
7737
7738         /* Group match words into recipes using preferred recipe grouping
7739          * criteria.
7740          */
7741         status = ice_create_recipe_group(hw, rm, lkup_exts);
7742         if (status)
7743                 goto err_unroll;
7744
7745         /* set the recipe priority if specified */
7746         rm->priority = (u8)rinfo->priority;
7747
7748         /* Find offsets from the field vector. Pick the first one for all the
7749          * recipes.
7750          */
7751         status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7752         if (status)
7753                 goto err_unroll;
7754
7755         /* An empty FV list means to use all the profiles returned in the
7756          * profile bitmap
7757          */
7758         if (LIST_EMPTY(&rm->fv_list)) {
7759                 u16 j;
7760
7761                 ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7762                         struct ice_sw_fv_list_entry *fvl;
7763
7764                         fvl = (struct ice_sw_fv_list_entry *)
7765                                 ice_malloc(hw, sizeof(*fvl));
7766                         if (!fvl)
7767                                 goto err_unroll;
7768                         fvl->fv_ptr = NULL;
7769                         fvl->profile_id = j;
7770                         LIST_ADD(&fvl->list_entry, &rm->fv_list);
7771                 }
7772         }
7773
7774         /* get bitmap of all profiles the recipe will be associated with */
7775         ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7776         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7777                             list_entry) {
7778                 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7779                 ice_set_bit((u16)fvit->profile_id, profiles);
7780         }
7781
7782         /* Look for a recipe which matches our requested fv / mask list */
7783         *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7784         if (*rid < ICE_MAX_NUM_RECIPES)
7785                 /* Success if found a recipe that match the existing criteria */
7786                 goto err_unroll;
7787
7788         rm->tun_type = rinfo->tun_type;
7789         /* Recipe we need does not exist, add a recipe */
7790         status = ice_add_sw_recipe(hw, rm, profiles);
7791         if (status)
7792                 goto err_unroll;
7793
7794         /* Associate all the recipes created with all the profiles in the
7795          * common field vector.
7796          */
7797         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7798                             list_entry) {
7799                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7800                 u16 j;
7801
7802                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7803                                                       (u8 *)r_bitmap, NULL);
7804                 if (status)
7805                         goto err_unroll;
7806
7807                 ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7808                               ICE_MAX_NUM_RECIPES);
7809                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7810                 if (status)
7811                         goto err_unroll;
7812
7813                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7814                                                       (u8 *)r_bitmap,
7815                                                       NULL);
7816                 ice_release_change_lock(hw);
7817
7818                 if (status)
7819                         goto err_unroll;
7820
7821                 /* Update profile to recipe bitmap array */
7822                 ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7823                               ICE_MAX_NUM_RECIPES);
7824
7825                 /* Update recipe to profile bitmap array */
7826                 ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7827                         ice_set_bit((u16)fvit->profile_id,
7828                                     recipe_to_profile[j]);
7829         }
7830
7831         *rid = rm->root_rid;
7832         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7833                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7834 err_unroll:
7835         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7836                                  ice_recp_grp_entry, l_entry) {
7837                 LIST_DEL(&r_entry->l_entry);
7838                 ice_free(hw, r_entry);
7839         }
7840
7841         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7842                                  list_entry) {
7843                 LIST_DEL(&fvit->list_entry);
7844                 ice_free(hw, fvit);
7845         }
7846
7847         if (rm->root_buf)
7848                 ice_free(hw, rm->root_buf);
7849
7850         ice_free(hw, rm);
7851
7852 err_free_lkup_exts:
7853         ice_free(hw, lkup_exts);
7854
7855         return status;
7856 }
7857
7858 /**
7859  * ice_find_dummy_packet - find dummy packet by tunnel type
7860  *
7861  * @lkups: lookup elements or match criteria for the advanced recipe, one
7862  *         structure per protocol header
7863  * @lkups_cnt: number of protocols
7864  * @tun_type: tunnel type from the match criteria
7865  * @pkt: dummy packet to fill according to filter match criteria
7866  * @pkt_len: packet length of dummy packet
7867  * @offsets: pointer to receive the pointer to the offsets for the packet
7868  */
7869 static void
7870 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7871                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7872                       u16 *pkt_len,
7873                       const struct ice_dummy_pkt_offsets **offsets)
7874 {
7875         bool tcp = false, udp = false, ipv6 = false, vlan = false;
7876         bool gre = false, mpls = false;
7877         u16 i;
7878
7879         for (i = 0; i < lkups_cnt; i++) {
7880                 if (lkups[i].type == ICE_UDP_ILOS)
7881                         udp = true;
7882                 else if (lkups[i].type == ICE_TCP_IL)
7883                         tcp = true;
7884                 else if (lkups[i].type == ICE_IPV6_OFOS)
7885                         ipv6 = true;
7886                 else if (lkups[i].type == ICE_VLAN_OFOS)
7887                         vlan = true;
7888                 else if (lkups[i].type == ICE_ETYPE_OL &&
7889                          lkups[i].h_u.ethertype.ethtype_id ==
7890                                 CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7891                          lkups[i].m_u.ethertype.ethtype_id ==
7892                                 CPU_TO_BE16(0xFFFF))
7893                         ipv6 = true;
7894                 else if (lkups[i].type == ICE_IPV4_OFOS &&
7895                          lkups[i].h_u.ipv4_hdr.protocol ==
7896                                 ICE_IPV4_NVGRE_PROTO_ID &&
7897                          lkups[i].m_u.ipv4_hdr.protocol ==
7898                                 0xFF)
7899                         gre = true;
7900                 else if (lkups[i].type == ICE_PPPOE &&
7901                          lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7902                                 CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7903                          lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7904                                 0xFFFF)
7905                         ipv6 = true;
7906                 else if (lkups[i].type == ICE_IPV4_IL &&
7907                          lkups[i].h_u.ipv4_hdr.protocol ==
7908                                 ICE_TCP_PROTO_ID &&
7909                          lkups[i].m_u.ipv4_hdr.protocol ==
7910                                 0xFF)
7911                         tcp = true;
7912                 else if (lkups[i].type == ICE_ETYPE_OL &&
7913                          lkups[i].h_u.ethertype.ethtype_id ==
7914                                 CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
7915                          lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
7916                         mpls = true;
7917         }
7918
7919         if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7920              tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7921                 *pkt = dummy_qinq_ipv6_pkt;
7922                 *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7923                 *offsets = dummy_qinq_ipv6_packet_offsets;
7924                 return;
7925         } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7926                            tun_type == ICE_NON_TUN_QINQ) {
7927                 *pkt = dummy_qinq_ipv4_pkt;
7928                 *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7929                 *offsets = dummy_qinq_ipv4_packet_offsets;
7930                 return;
7931         }
7932
7933         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7934                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7935                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7936                 *offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7937                 return;
7938         } else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7939                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7940                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7941                 *offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7942                 return;
7943         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
7944                 *pkt = dummy_qinq_pppoe_ipv6_packet;
7945                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7946                 *offsets = dummy_qinq_pppoe_packet_offsets;
7947                 return;
7948         } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7949                         tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7950                 *pkt = dummy_qinq_pppoe_ipv4_pkt;
7951                 *pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7952                 *offsets = dummy_qinq_pppoe_packet_offsets;
7953                 return;
7954         }
7955
7956         if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7957                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
7958                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7959                 *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7960                 return;
7961         } else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7962                 *pkt = dummy_ipv6_gtp_packet;
7963                 *pkt_len = sizeof(dummy_ipv6_gtp_packet);
7964                 *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7965                 return;
7966         }
7967
7968         if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7969                 *pkt = dummy_ipv4_esp_pkt;
7970                 *pkt_len = sizeof(dummy_ipv4_esp_pkt);
7971                 *offsets = dummy_ipv4_esp_packet_offsets;
7972                 return;
7973         }
7974
7975         if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7976                 *pkt = dummy_ipv6_esp_pkt;
7977                 *pkt_len = sizeof(dummy_ipv6_esp_pkt);
7978                 *offsets = dummy_ipv6_esp_packet_offsets;
7979                 return;
7980         }
7981
7982         if (tun_type == ICE_SW_TUN_IPV4_AH) {
7983                 *pkt = dummy_ipv4_ah_pkt;
7984                 *pkt_len = sizeof(dummy_ipv4_ah_pkt);
7985                 *offsets = dummy_ipv4_ah_packet_offsets;
7986                 return;
7987         }
7988
7989         if (tun_type == ICE_SW_TUN_IPV6_AH) {
7990                 *pkt = dummy_ipv6_ah_pkt;
7991                 *pkt_len = sizeof(dummy_ipv6_ah_pkt);
7992                 *offsets = dummy_ipv6_ah_packet_offsets;
7993                 return;
7994         }
7995
7996         if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7997                 *pkt = dummy_ipv4_nat_pkt;
7998                 *pkt_len = sizeof(dummy_ipv4_nat_pkt);
7999                 *offsets = dummy_ipv4_nat_packet_offsets;
8000                 return;
8001         }
8002
8003         if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8004                 *pkt = dummy_ipv6_nat_pkt;
8005                 *pkt_len = sizeof(dummy_ipv6_nat_pkt);
8006                 *offsets = dummy_ipv6_nat_packet_offsets;
8007                 return;
8008         }
8009
8010         if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8011                 *pkt = dummy_ipv4_l2tpv3_pkt;
8012                 *pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8013                 *offsets = dummy_ipv4_l2tpv3_packet_offsets;
8014                 return;
8015         }
8016
8017         if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8018                 *pkt = dummy_ipv6_l2tpv3_pkt;
8019                 *pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8020                 *offsets = dummy_ipv6_l2tpv3_packet_offsets;
8021                 return;
8022         }
8023
8024         if (tun_type == ICE_SW_TUN_GTP) {
8025                 *pkt = dummy_udp_gtp_packet;
8026                 *pkt_len = sizeof(dummy_udp_gtp_packet);
8027                 *offsets = dummy_udp_gtp_packet_offsets;
8028                 return;
8029         }
8030
8031         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8032             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8033                 *pkt = dummy_ipv4_gtpu_ipv4_packet;
8034                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8035                 *offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8036                 return;
8037         }
8038
8039         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8040             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8041                 *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8042                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8043                 *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8044                 return;
8045         }
8046
8047         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8048             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8049                 *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8050                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8051                 *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8052                 return;
8053         }
8054
8055         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8056             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8057                 *pkt = dummy_ipv4_gtpu_ipv6_packet;
8058                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8059                 *offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8060                 return;
8061         }
8062
8063         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8064             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8065                 *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8066                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8067                 *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8068                 return;
8069         }
8070
8071         if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8072             tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8073                 *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8074                 *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8075                 *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8076                 return;
8077         }
8078
8079         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8080             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8081                 *pkt = dummy_ipv6_gtpu_ipv4_packet;
8082                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8083                 *offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8084                 return;
8085         }
8086
8087         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8088             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8089                 *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8090                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8091                 *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8092                 return;
8093         }
8094
8095         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8096             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8097                 *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8098                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8099                 *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8100                 return;
8101         }
8102
8103         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8104             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8105                 *pkt = dummy_ipv6_gtpu_ipv6_packet;
8106                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8107                 *offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8108                 return;
8109         }
8110
8111         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8112             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8113                 *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8114                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8115                 *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8116                 return;
8117         }
8118
8119         if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8120             tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8121                 *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8122                 *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8123                 *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8124                 return;
8125         }
8126
8127         if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8128                 *pkt = dummy_pppoe_ipv6_packet;
8129                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8130                 *offsets = dummy_pppoe_packet_offsets;
8131                 return;
8132         } else if (tun_type == ICE_SW_TUN_PPPOE ||
8133                 tun_type == ICE_SW_TUN_PPPOE_PAY) {
8134                 *pkt = dummy_pppoe_ipv4_packet;
8135                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8136                 *offsets = dummy_pppoe_packet_offsets;
8137                 return;
8138         }
8139
8140         if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8141                 *pkt = dummy_pppoe_ipv4_packet;
8142                 *pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8143                 *offsets = dummy_pppoe_packet_ipv4_offsets;
8144                 return;
8145         }
8146
8147         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8148                 *pkt = dummy_pppoe_ipv4_tcp_packet;
8149                 *pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8150                 *offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8151                 return;
8152         }
8153
8154         if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8155                 *pkt = dummy_pppoe_ipv4_udp_packet;
8156                 *pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8157                 *offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8158                 return;
8159         }
8160
8161         if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8162                 *pkt = dummy_pppoe_ipv6_packet;
8163                 *pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8164                 *offsets = dummy_pppoe_packet_ipv6_offsets;
8165                 return;
8166         }
8167
8168         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8169                 *pkt = dummy_pppoe_ipv6_tcp_packet;
8170                 *pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8171                 *offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8172                 return;
8173         }
8174
8175         if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8176                 *pkt = dummy_pppoe_ipv6_udp_packet;
8177                 *pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8178                 *offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8179                 return;
8180         }
8181
8182         if (tun_type == ICE_SW_IPV4_TCP) {
8183                 *pkt = dummy_tcp_packet;
8184                 *pkt_len = sizeof(dummy_tcp_packet);
8185                 *offsets = dummy_tcp_packet_offsets;
8186                 return;
8187         }
8188
8189         if (tun_type == ICE_SW_IPV4_UDP) {
8190                 *pkt = dummy_udp_packet;
8191                 *pkt_len = sizeof(dummy_udp_packet);
8192                 *offsets = dummy_udp_packet_offsets;
8193                 return;
8194         }
8195
8196         if (tun_type == ICE_SW_IPV6_TCP) {
8197                 *pkt = dummy_tcp_ipv6_packet;
8198                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8199                 *offsets = dummy_tcp_ipv6_packet_offsets;
8200                 return;
8201         }
8202
8203         if (tun_type == ICE_SW_IPV6_UDP) {
8204                 *pkt = dummy_udp_ipv6_packet;
8205                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8206                 *offsets = dummy_udp_ipv6_packet_offsets;
8207                 return;
8208         }
8209
8210         if (tun_type == ICE_ALL_TUNNELS) {
8211                 *pkt = dummy_gre_udp_packet;
8212                 *pkt_len = sizeof(dummy_gre_udp_packet);
8213                 *offsets = dummy_gre_udp_packet_offsets;
8214                 return;
8215         }
8216
8217         if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8218                 if (tcp) {
8219                         *pkt = dummy_gre_tcp_packet;
8220                         *pkt_len = sizeof(dummy_gre_tcp_packet);
8221                         *offsets = dummy_gre_tcp_packet_offsets;
8222                         return;
8223                 }
8224
8225                 *pkt = dummy_gre_udp_packet;
8226                 *pkt_len = sizeof(dummy_gre_udp_packet);
8227                 *offsets = dummy_gre_udp_packet_offsets;
8228                 return;
8229         }
8230
8231         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8232             tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8233             tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8234             tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8235                 if (tcp) {
8236                         *pkt = dummy_udp_tun_tcp_packet;
8237                         *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8238                         *offsets = dummy_udp_tun_tcp_packet_offsets;
8239                         return;
8240                 }
8241
8242                 *pkt = dummy_udp_tun_udp_packet;
8243                 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
8244                 *offsets = dummy_udp_tun_udp_packet_offsets;
8245                 return;
8246         }
8247
8248         if (udp && !ipv6) {
8249                 if (vlan) {
8250                         *pkt = dummy_vlan_udp_packet;
8251                         *pkt_len = sizeof(dummy_vlan_udp_packet);
8252                         *offsets = dummy_vlan_udp_packet_offsets;
8253                         return;
8254                 }
8255                 *pkt = dummy_udp_packet;
8256                 *pkt_len = sizeof(dummy_udp_packet);
8257                 *offsets = dummy_udp_packet_offsets;
8258                 return;
8259         } else if (udp && ipv6) {
8260                 if (vlan) {
8261                         *pkt = dummy_vlan_udp_ipv6_packet;
8262                         *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8263                         *offsets = dummy_vlan_udp_ipv6_packet_offsets;
8264                         return;
8265                 }
8266                 *pkt = dummy_udp_ipv6_packet;
8267                 *pkt_len = sizeof(dummy_udp_ipv6_packet);
8268                 *offsets = dummy_udp_ipv6_packet_offsets;
8269                 return;
8270         } else if ((tcp && ipv6) || ipv6) {
8271                 if (vlan) {
8272                         *pkt = dummy_vlan_tcp_ipv6_packet;
8273                         *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8274                         *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8275                         return;
8276                 }
8277                 *pkt = dummy_tcp_ipv6_packet;
8278                 *pkt_len = sizeof(dummy_tcp_ipv6_packet);
8279                 *offsets = dummy_tcp_ipv6_packet_offsets;
8280                 return;
8281         }
8282
8283         if (vlan) {
8284                 *pkt = dummy_vlan_tcp_packet;
8285                 *pkt_len = sizeof(dummy_vlan_tcp_packet);
8286                 *offsets = dummy_vlan_tcp_packet_offsets;
8287         }  else if (mpls) {
8288                 *pkt = dummy_mpls_packet;
8289                 *pkt_len = sizeof(dummy_mpls_packet);
8290                 *offsets = dummy_mpls_packet_offsets;
8291         } else {
8292                 *pkt = dummy_tcp_packet;
8293                 *pkt_len = sizeof(dummy_tcp_packet);
8294                 *offsets = dummy_tcp_packet_offsets;
8295         }
8296 }
8297
8298 /**
8299  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8300  *
8301  * @lkups: lookup elements or match criteria for the advanced recipe, one
8302  *         structure per protocol header
8303  * @lkups_cnt: number of protocols
8304  * @s_rule: stores rule information from the match criteria
8305  * @dummy_pkt: dummy packet to fill according to filter match criteria
8306  * @pkt_len: packet length of dummy packet
8307  * @offsets: offset info for the dummy packet
8308  */
8309 static enum ice_status
8310 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8311                           struct ice_aqc_sw_rules_elem *s_rule,
8312                           const u8 *dummy_pkt, u16 pkt_len,
8313                           const struct ice_dummy_pkt_offsets *offsets)
8314 {
8315         u8 *pkt;
8316         u16 i;
8317
8318         /* Start with a packet with a pre-defined/dummy content. Then, fill
8319          * in the header values to be looked up or matched.
8320          */
8321         pkt = s_rule->pdata.lkup_tx_rx.hdr;
8322
8323         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8324
8325         for (i = 0; i < lkups_cnt; i++) {
8326                 enum ice_protocol_type type;
8327                 u16 offset = 0, len = 0, j;
8328                 bool found = false;
8329
8330                 /* find the start of this layer; it should be found since this
8331                  * was already checked when search for the dummy packet
8332                  */
8333                 type = lkups[i].type;
8334                 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8335                         if (type == offsets[j].type) {
8336                                 offset = offsets[j].offset;
8337                                 found = true;
8338                                 break;
8339                         }
8340                 }
8341                 /* this should never happen in a correct calling sequence */
8342                 if (!found)
8343                         return ICE_ERR_PARAM;
8344
8345                 switch (lkups[i].type) {
8346                 case ICE_MAC_OFOS:
8347                 case ICE_MAC_IL:
8348                         len = sizeof(struct ice_ether_hdr);
8349                         break;
8350                 case ICE_ETYPE_OL:
8351                         len = sizeof(struct ice_ethtype_hdr);
8352                         break;
8353                 case ICE_VLAN_OFOS:
8354                 case ICE_VLAN_EX:
8355                 case ICE_VLAN_IN:
8356                         len = sizeof(struct ice_vlan_hdr);
8357                         break;
8358                 case ICE_IPV4_OFOS:
8359                 case ICE_IPV4_IL:
8360                         len = sizeof(struct ice_ipv4_hdr);
8361                         break;
8362                 case ICE_IPV6_OFOS:
8363                 case ICE_IPV6_IL:
8364                         len = sizeof(struct ice_ipv6_hdr);
8365                         break;
8366                 case ICE_TCP_IL:
8367                 case ICE_UDP_OF:
8368                 case ICE_UDP_ILOS:
8369                         len = sizeof(struct ice_l4_hdr);
8370                         break;
8371                 case ICE_SCTP_IL:
8372                         len = sizeof(struct ice_sctp_hdr);
8373                         break;
8374                 case ICE_NVGRE:
8375                         len = sizeof(struct ice_nvgre);
8376                         break;
8377                 case ICE_VXLAN:
8378                 case ICE_GENEVE:
8379                 case ICE_VXLAN_GPE:
8380                         len = sizeof(struct ice_udp_tnl_hdr);
8381                         break;
8382
8383                 case ICE_GTP:
8384                 case ICE_GTP_NO_PAY:
8385                         len = sizeof(struct ice_udp_gtp_hdr);
8386                         break;
8387                 case ICE_PPPOE:
8388                         len = sizeof(struct ice_pppoe_hdr);
8389                         break;
8390                 case ICE_ESP:
8391                         len = sizeof(struct ice_esp_hdr);
8392                         break;
8393                 case ICE_NAT_T:
8394                         len = sizeof(struct ice_nat_t_hdr);
8395                         break;
8396                 case ICE_AH:
8397                         len = sizeof(struct ice_ah_hdr);
8398                         break;
8399                 case ICE_L2TPV3:
8400                         len = sizeof(struct ice_l2tpv3_sess_hdr);
8401                         break;
8402                 default:
8403                         return ICE_ERR_PARAM;
8404                 }
8405
8406                 /* the length should be a word multiple */
8407                 if (len % ICE_BYTES_PER_WORD)
8408                         return ICE_ERR_CFG;
8409
8410                 /* We have the offset to the header start, the length, the
8411                  * caller's header values and mask. Use this information to
8412                  * copy the data into the dummy packet appropriately based on
8413                  * the mask. Note that we need to only write the bits as
8414                  * indicated by the mask to make sure we don't improperly write
8415                  * over any significant packet data.
8416                  */
8417                 for (j = 0; j < len / sizeof(u16); j++)
8418                         if (((u16 *)&lkups[i].m_u)[j])
8419                                 ((u16 *)(pkt + offset))[j] =
8420                                         (((u16 *)(pkt + offset))[j] &
8421                                          ~((u16 *)&lkups[i].m_u)[j]) |
8422                                         (((u16 *)&lkups[i].h_u)[j] &
8423                                          ((u16 *)&lkups[i].m_u)[j]);
8424         }
8425
8426         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8427
8428         return ICE_SUCCESS;
8429 }
8430
8431 /**
8432  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8433  * @hw: pointer to the hardware structure
8434  * @tun_type: tunnel type
8435  * @pkt: dummy packet to fill in
8436  * @offsets: offset info for the dummy packet
8437  */
8438 static enum ice_status
8439 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8440                         u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8441 {
8442         u16 open_port, i;
8443
8444         switch (tun_type) {
8445         case ICE_SW_TUN_AND_NON_TUN:
8446         case ICE_SW_TUN_VXLAN_GPE:
8447         case ICE_SW_TUN_VXLAN:
8448         case ICE_SW_TUN_VXLAN_VLAN:
8449         case ICE_SW_TUN_UDP:
8450                 if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8451                         return ICE_ERR_CFG;
8452                 break;
8453
8454         case ICE_SW_TUN_GENEVE:
8455         case ICE_SW_TUN_GENEVE_VLAN:
8456                 if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8457                         return ICE_ERR_CFG;
8458                 break;
8459
8460         default:
8461                 /* Nothing needs to be done for this tunnel type */
8462                 return ICE_SUCCESS;
8463         }
8464
8465         /* Find the outer UDP protocol header and insert the port number */
8466         for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8467                 if (offsets[i].type == ICE_UDP_OF) {
8468                         struct ice_l4_hdr *hdr;
8469                         u16 offset;
8470
8471                         offset = offsets[i].offset;
8472                         hdr = (struct ice_l4_hdr *)&pkt[offset];
8473                         hdr->dst_port = CPU_TO_BE16(open_port);
8474
8475                         return ICE_SUCCESS;
8476                 }
8477         }
8478
8479         return ICE_ERR_CFG;
8480 }
8481
8482 /**
8483  * ice_find_adv_rule_entry - Search a rule entry
8484  * @hw: pointer to the hardware structure
8485  * @lkups: lookup elements or match criteria for the advanced recipe, one
8486  *         structure per protocol header
8487  * @lkups_cnt: number of protocols
8488  * @recp_id: recipe ID for which we are finding the rule
8489  * @rinfo: other information regarding the rule e.g. priority and action info
8490  *
8491  * Helper function to search for a given advance rule entry
8492  * Returns pointer to entry storing the rule if found
8493  */
8494 static struct ice_adv_fltr_mgmt_list_entry *
8495 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8496                         u16 lkups_cnt, u16 recp_id,
8497                         struct ice_adv_rule_info *rinfo)
8498 {
8499         struct ice_adv_fltr_mgmt_list_entry *list_itr;
8500         struct ice_switch_info *sw = hw->switch_info;
8501         int i;
8502
8503         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8504                             ice_adv_fltr_mgmt_list_entry, list_entry) {
8505                 bool lkups_matched = true;
8506
8507                 if (lkups_cnt != list_itr->lkups_cnt)
8508                         continue;
8509                 for (i = 0; i < list_itr->lkups_cnt; i++)
8510                         if (memcmp(&list_itr->lkups[i], &lkups[i],
8511                                    sizeof(*lkups))) {
8512                                 lkups_matched = false;
8513                                 break;
8514                         }
8515                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8516                     rinfo->tun_type == list_itr->rule_info.tun_type &&
8517                     lkups_matched)
8518                         return list_itr;
8519         }
8520         return NULL;
8521 }
8522
8523 /**
8524  * ice_adv_add_update_vsi_list
8525  * @hw: pointer to the hardware structure
8526  * @m_entry: pointer to current adv filter management list entry
8527  * @cur_fltr: filter information from the book keeping entry
8528  * @new_fltr: filter information with the new VSI to be added
8529  *
8530  * Call AQ command to add or update previously created VSI list with new VSI.
8531  *
8532  * Helper function to do book keeping associated with adding filter information
8533  * The algorithm to do the booking keeping is described below :
8534  * When a VSI needs to subscribe to a given advanced filter
8535  *      if only one VSI has been added till now
8536  *              Allocate a new VSI list and add two VSIs
8537  *              to this list using switch rule command
8538  *              Update the previously created switch rule with the
8539  *              newly created VSI list ID
8540  *      if a VSI list was previously created
8541  *              Add the new VSI to the previously created VSI list set
8542  *              using the update switch rule command
8543  */
8544 static enum ice_status
8545 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8546                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
8547                             struct ice_adv_rule_info *cur_fltr,
8548                             struct ice_adv_rule_info *new_fltr)
8549 {
8550         enum ice_status status;
8551         u16 vsi_list_id = 0;
8552
8553         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8554             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8555             cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8556                 return ICE_ERR_NOT_IMPL;
8557
8558         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8559              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8560             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8561              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8562                 return ICE_ERR_NOT_IMPL;
8563
8564         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8565                  /* Only one entry existed in the mapping and it was not already
8566                   * a part of a VSI list. So, create a VSI list with the old and
8567                   * new VSIs.
8568                   */
8569                 struct ice_fltr_info tmp_fltr;
8570                 u16 vsi_handle_arr[2];
8571
8572                 /* A rule already exists with the new VSI being added */
8573                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8574                     new_fltr->sw_act.fwd_id.hw_vsi_id)
8575                         return ICE_ERR_ALREADY_EXISTS;
8576
8577                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8578                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8579                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8580                                                   &vsi_list_id,
8581                                                   ICE_SW_LKUP_LAST);
8582                 if (status)
8583                         return status;
8584
8585                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8586                 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8587                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8588                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8589                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8590                 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8591
8592                 /* Update the previous switch rule of "forward to VSI" to
8593                  * "fwd to VSI list"
8594                  */
8595                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8596                 if (status)
8597                         return status;
8598
8599                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8600                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8601                 m_entry->vsi_list_info =
8602                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8603                                                 vsi_list_id);
8604         } else {
8605                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8606
8607                 if (!m_entry->vsi_list_info)
8608                         return ICE_ERR_CFG;
8609
8610                 /* A rule already exists with the new VSI being added */
8611                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8612                         return ICE_SUCCESS;
8613
8614                 /* Update the previously created VSI list set with
8615                  * the new VSI ID passed in
8616                  */
8617                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8618
8619                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8620                                                   vsi_list_id, false,
8621                                                   ice_aqc_opc_update_sw_rules,
8622                                                   ICE_SW_LKUP_LAST);
8623                 /* update VSI list mapping info with new VSI ID */
8624                 if (!status)
8625                         ice_set_bit(vsi_handle,
8626                                     m_entry->vsi_list_info->vsi_map);
8627         }
8628         if (!status)
8629                 m_entry->vsi_count++;
8630         return status;
8631 }
8632
8633 /**
8634  * ice_add_adv_rule - helper function to create an advanced switch rule
8635  * @hw: pointer to the hardware structure
8636  * @lkups: information on the words that needs to be looked up. All words
8637  * together makes one recipe
8638  * @lkups_cnt: num of entries in the lkups array
8639  * @rinfo: other information related to the rule that needs to be programmed
8640  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8641  *               ignored is case of error.
8642  *
8643  * This function can program only 1 rule at a time. The lkups is used to
8644  * describe the all the words that forms the "lookup" portion of the recipe.
8645  * These words can span multiple protocols. Callers to this function need to
8646  * pass in a list of protocol headers with lookup information along and mask
8647  * that determines which words are valid from the given protocol header.
8648  * rinfo describes other information related to this rule such as forwarding
8649  * IDs, priority of this rule, etc.
8650  */
8651 enum ice_status
8652 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8653                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8654                  struct ice_rule_query_data *added_entry)
8655 {
8656         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8657         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8658         const struct ice_dummy_pkt_offsets *pkt_offsets;
8659         struct ice_aqc_sw_rules_elem *s_rule = NULL;
8660         struct LIST_HEAD_TYPE *rule_head;
8661         struct ice_switch_info *sw;
8662         enum ice_status status;
8663         const u8 *pkt = NULL;
8664         bool prof_rule;
8665         u16 word_cnt;
8666         u32 act = 0;
8667         u8 q_rgn;
8668
8669         /* Initialize profile to result index bitmap */
8670         if (!hw->switch_info->prof_res_bm_init) {
8671                 hw->switch_info->prof_res_bm_init = 1;
8672                 ice_init_prof_result_bm(hw);
8673         }
8674
8675         prof_rule = ice_is_prof_rule(rinfo->tun_type);
8676         if (!prof_rule && !lkups_cnt)
8677                 return ICE_ERR_PARAM;
8678
8679         /* get # of words we need to match */
8680         word_cnt = 0;
8681         for (i = 0; i < lkups_cnt; i++) {
8682                 u16 j, *ptr;
8683
8684                 ptr = (u16 *)&lkups[i].m_u;
8685                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8686                         if (ptr[j] != 0)
8687                                 word_cnt++;
8688         }
8689
8690         if (prof_rule) {
8691                 if (word_cnt > ICE_MAX_CHAIN_WORDS)
8692                         return ICE_ERR_PARAM;
8693         } else {
8694                 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8695                         return ICE_ERR_PARAM;
8696         }
8697
8698         /* make sure that we can locate a dummy packet */
8699         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8700                               &pkt_offsets);
8701         if (!pkt) {
8702                 status = ICE_ERR_PARAM;
8703                 goto err_ice_add_adv_rule;
8704         }
8705
8706         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8707               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8708               rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8709               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8710                 return ICE_ERR_CFG;
8711
8712         vsi_handle = rinfo->sw_act.vsi_handle;
8713         if (!ice_is_vsi_valid(hw, vsi_handle))
8714                 return ICE_ERR_PARAM;
8715
8716         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8717                 rinfo->sw_act.fwd_id.hw_vsi_id =
8718                         ice_get_hw_vsi_num(hw, vsi_handle);
8719         if (rinfo->sw_act.flag & ICE_FLTR_TX)
8720                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8721
8722         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8723         if (status)
8724                 return status;
8725         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8726         if (m_entry) {
8727                 /* we have to add VSI to VSI_LIST and increment vsi_count.
8728                  * Also Update VSI list so that we can change forwarding rule
8729                  * if the rule already exists, we will check if it exists with
8730                  * same vsi_id, if not then add it to the VSI list if it already
8731                  * exists if not then create a VSI list and add the existing VSI
8732                  * ID and the new VSI ID to the list
8733                  * We will add that VSI to the list
8734                  */
8735                 status = ice_adv_add_update_vsi_list(hw, m_entry,
8736                                                      &m_entry->rule_info,
8737                                                      rinfo);
8738                 if (added_entry) {
8739                         added_entry->rid = rid;
8740                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8741                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8742                 }
8743                 return status;
8744         }
8745         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8746         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8747         if (!s_rule)
8748                 return ICE_ERR_NO_MEMORY;
8749         act |= ICE_SINGLE_ACT_LAN_ENABLE;
8750         switch (rinfo->sw_act.fltr_act) {
8751         case ICE_FWD_TO_VSI:
8752                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8753                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8754                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8755                 break;
8756         case ICE_FWD_TO_Q:
8757                 act |= ICE_SINGLE_ACT_TO_Q;
8758                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8759                        ICE_SINGLE_ACT_Q_INDEX_M;
8760                 break;
8761         case ICE_FWD_TO_QGRP:
8762                 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8763                         (u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8764                 act |= ICE_SINGLE_ACT_TO_Q;
8765                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8766                        ICE_SINGLE_ACT_Q_INDEX_M;
8767                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8768                        ICE_SINGLE_ACT_Q_REGION_M;
8769                 break;
8770         case ICE_DROP_PACKET:
8771                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8772                        ICE_SINGLE_ACT_VALID_BIT;
8773                 break;
8774         default:
8775                 status = ICE_ERR_CFG;
8776                 goto err_ice_add_adv_rule;
8777         }
8778
8779         /* set the rule LOOKUP type based on caller specified 'RX'
8780          * instead of hardcoding it to be either LOOKUP_TX/RX
8781          *
8782          * for 'RX' set the source to be the port number
8783          * for 'TX' set the source to be the source HW VSI number (determined
8784          * by caller)
8785          */
8786         if (rinfo->rx) {
8787                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8788                 s_rule->pdata.lkup_tx_rx.src =
8789                         CPU_TO_LE16(hw->port_info->lport);
8790         } else {
8791                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8792                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8793         }
8794
8795         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8796         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8797
8798         status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8799                                            pkt_len, pkt_offsets);
8800         if (status)
8801                 goto err_ice_add_adv_rule;
8802
8803         if (rinfo->tun_type != ICE_NON_TUN &&
8804             rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8805                 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8806                                                  s_rule->pdata.lkup_tx_rx.hdr,
8807                                                  pkt_offsets);
8808                 if (status)
8809                         goto err_ice_add_adv_rule;
8810         }
8811
8812         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8813                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8814                                  NULL);
8815         if (status)
8816                 goto err_ice_add_adv_rule;
8817         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8818                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8819         if (!adv_fltr) {
8820                 status = ICE_ERR_NO_MEMORY;
8821                 goto err_ice_add_adv_rule;
8822         }
8823
8824         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8825                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8826                            ICE_NONDMA_TO_NONDMA);
8827         if (!adv_fltr->lkups && !prof_rule) {
8828                 status = ICE_ERR_NO_MEMORY;
8829                 goto err_ice_add_adv_rule;
8830         }
8831
8832         adv_fltr->lkups_cnt = lkups_cnt;
8833         adv_fltr->rule_info = *rinfo;
8834         adv_fltr->rule_info.fltr_rule_id =
8835                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8836         sw = hw->switch_info;
8837         sw->recp_list[rid].adv_rule = true;
8838         rule_head = &sw->recp_list[rid].filt_rules;
8839
8840         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8841                 adv_fltr->vsi_count = 1;
8842
8843         /* Add rule entry to book keeping list */
8844         LIST_ADD(&adv_fltr->list_entry, rule_head);
8845         if (added_entry) {
8846                 added_entry->rid = rid;
8847                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8848                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8849         }
8850 err_ice_add_adv_rule:
8851         if (status && adv_fltr) {
8852                 ice_free(hw, adv_fltr->lkups);
8853                 ice_free(hw, adv_fltr);
8854         }
8855
8856         ice_free(hw, s_rule);
8857
8858         return status;
8859 }
8860
8861 /**
8862  * ice_adv_rem_update_vsi_list
8863  * @hw: pointer to the hardware structure
8864  * @vsi_handle: VSI handle of the VSI to remove
8865  * @fm_list: filter management entry for which the VSI list management needs to
8866  *           be done
8867  */
8868 static enum ice_status
8869 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8870                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
8871 {
8872         struct ice_vsi_list_map_info *vsi_list_info;
8873         enum ice_sw_lkup_type lkup_type;
8874         enum ice_status status;
8875         u16 vsi_list_id;
8876
8877         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8878             fm_list->vsi_count == 0)
8879                 return ICE_ERR_PARAM;
8880
8881         /* A rule with the VSI being removed does not exist */
8882         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8883                 return ICE_ERR_DOES_NOT_EXIST;
8884
8885         lkup_type = ICE_SW_LKUP_LAST;
8886         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8887         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8888                                           ice_aqc_opc_update_sw_rules,
8889                                           lkup_type);
8890         if (status)
8891                 return status;
8892
8893         fm_list->vsi_count--;
8894         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8895         vsi_list_info = fm_list->vsi_list_info;
8896         if (fm_list->vsi_count == 1) {
8897                 struct ice_fltr_info tmp_fltr;
8898                 u16 rem_vsi_handle;
8899
8900                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8901                                                     ICE_MAX_VSI);
8902                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8903                         return ICE_ERR_OUT_OF_RANGE;
8904
8905                 /* Make sure VSI list is empty before removing it below */
8906                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8907                                                   vsi_list_id, true,
8908                                                   ice_aqc_opc_update_sw_rules,
8909                                                   lkup_type);
8910                 if (status)
8911                         return status;
8912
8913                 ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8914                 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8915                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8916                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8917                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8918                 tmp_fltr.fwd_id.hw_vsi_id =
8919                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8920                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8921                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
8922                 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8923
8924                 /* Update the previous switch rule of "MAC forward to VSI" to
8925                  * "MAC fwd to VSI list"
8926                  */
8927                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8928                 if (status) {
8929                         ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8930                                   tmp_fltr.fwd_id.hw_vsi_id, status);
8931                         return status;
8932                 }
8933                 fm_list->vsi_list_info->ref_cnt--;
8934
8935                 /* Remove the VSI list since it is no longer used */
8936                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8937                 if (status) {
8938                         ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8939                                   vsi_list_id, status);
8940                         return status;
8941                 }
8942
8943                 LIST_DEL(&vsi_list_info->list_entry);
8944                 ice_free(hw, vsi_list_info);
8945                 fm_list->vsi_list_info = NULL;
8946         }
8947
8948         return status;
8949 }
8950
8951 /**
8952  * ice_rem_adv_rule - removes existing advanced switch rule
8953  * @hw: pointer to the hardware structure
8954  * @lkups: information on the words that needs to be looked up. All words
8955  *         together makes one recipe
8956  * @lkups_cnt: num of entries in the lkups array
8957  * @rinfo: Its the pointer to the rule information for the rule
8958  *
8959  * This function can be used to remove 1 rule at a time. The lkups is
8960  * used to describe all the words that forms the "lookup" portion of the
8961  * rule. These words can span multiple protocols. Callers to this function
8962  * need to pass in a list of protocol headers with lookup information along
8963  * and mask that determines which words are valid from the given protocol
8964  * header. rinfo describes other information related to this rule such as
8965  * forwarding IDs, priority of this rule, etc.
8966  */
8967 enum ice_status
8968 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8969                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8970 {
8971         struct ice_adv_fltr_mgmt_list_entry *list_elem;
8972         struct ice_prot_lkup_ext lkup_exts;
8973         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8974         enum ice_status status = ICE_SUCCESS;
8975         bool remove_rule = false;
8976         u16 i, rid, vsi_handle;
8977
8978         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8979         for (i = 0; i < lkups_cnt; i++) {
8980                 u16 count;
8981
8982                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
8983                         return ICE_ERR_CFG;
8984
8985                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8986                 if (!count)
8987                         return ICE_ERR_CFG;
8988         }
8989
8990         /* Create any special protocol/offset pairs, such as looking at tunnel
8991          * bits by extracting metadata
8992          */
8993         status = ice_add_special_words(rinfo, &lkup_exts);
8994         if (status)
8995                 return status;
8996
8997         rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
8998         /* If did not find a recipe that match the existing criteria */
8999         if (rid == ICE_MAX_NUM_RECIPES)
9000                 return ICE_ERR_PARAM;
9001
9002         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9003         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9004         /* the rule is already removed */
9005         if (!list_elem)
9006                 return ICE_SUCCESS;
9007         ice_acquire_lock(rule_lock);
9008         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9009                 remove_rule = true;
9010         } else if (list_elem->vsi_count > 1) {
9011                 remove_rule = false;
9012                 vsi_handle = rinfo->sw_act.vsi_handle;
9013                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9014         } else {
9015                 vsi_handle = rinfo->sw_act.vsi_handle;
9016                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9017                 if (status) {
9018                         ice_release_lock(rule_lock);
9019                         return status;
9020                 }
9021                 if (list_elem->vsi_count == 0)
9022                         remove_rule = true;
9023         }
9024         ice_release_lock(rule_lock);
9025         if (remove_rule) {
9026                 struct ice_aqc_sw_rules_elem *s_rule;
9027                 u16 rule_buf_sz;
9028
9029                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9030                 s_rule = (struct ice_aqc_sw_rules_elem *)
9031                         ice_malloc(hw, rule_buf_sz);
9032                 if (!s_rule)
9033                         return ICE_ERR_NO_MEMORY;
9034                 s_rule->pdata.lkup_tx_rx.act = 0;
9035                 s_rule->pdata.lkup_tx_rx.index =
9036                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9037                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9038                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9039                                          rule_buf_sz, 1,
9040                                          ice_aqc_opc_remove_sw_rules, NULL);
9041                 if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9042                         struct ice_switch_info *sw = hw->switch_info;
9043
9044                         ice_acquire_lock(rule_lock);
9045                         LIST_DEL(&list_elem->list_entry);
9046                         ice_free(hw, list_elem->lkups);
9047                         ice_free(hw, list_elem);
9048                         ice_release_lock(rule_lock);
9049                         if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9050                                 sw->recp_list[rid].adv_rule = false;
9051                 }
9052                 ice_free(hw, s_rule);
9053         }
9054         return status;
9055 }
9056
9057 /**
9058  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9059  * @hw: pointer to the hardware structure
9060  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9061  *
9062  * This function is used to remove 1 rule at a time. The removal is based on
9063  * the remove_entry parameter. This function will remove rule for a given
9064  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9065  */
9066 enum ice_status
9067 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9068                        struct ice_rule_query_data *remove_entry)
9069 {
9070         struct ice_adv_fltr_mgmt_list_entry *list_itr;
9071         struct LIST_HEAD_TYPE *list_head;
9072         struct ice_adv_rule_info rinfo;
9073         struct ice_switch_info *sw;
9074
9075         sw = hw->switch_info;
9076         if (!sw->recp_list[remove_entry->rid].recp_created)
9077                 return ICE_ERR_PARAM;
9078         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9079         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9080                             list_entry) {
9081                 if (list_itr->rule_info.fltr_rule_id ==
9082                     remove_entry->rule_id) {
9083                         rinfo = list_itr->rule_info;
9084                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9085                         return ice_rem_adv_rule(hw, list_itr->lkups,
9086                                                 list_itr->lkups_cnt, &rinfo);
9087                 }
9088         }
9089         /* either list is empty or unable to find rule */
9090         return ICE_ERR_DOES_NOT_EXIST;
9091 }
9092
9093 /**
9094  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9095  *                       given VSI handle
9096  * @hw: pointer to the hardware structure
9097  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9098  *
9099  * This function is used to remove all the rules for a given VSI and as soon
9100  * as removing a rule fails, it will return immediately with the error code,
9101  * else it will return ICE_SUCCESS
9102  */
9103 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9104 {
9105         struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9106         struct ice_vsi_list_map_info *map_info;
9107         struct LIST_HEAD_TYPE *list_head;
9108         struct ice_adv_rule_info rinfo;
9109         struct ice_switch_info *sw;
9110         enum ice_status status;
9111         u8 rid;
9112
9113         sw = hw->switch_info;
9114         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9115                 if (!sw->recp_list[rid].recp_created)
9116                         continue;
9117                 if (!sw->recp_list[rid].adv_rule)
9118                         continue;
9119
9120                 list_head = &sw->recp_list[rid].filt_rules;
9121                 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9122                                          ice_adv_fltr_mgmt_list_entry,
9123                                          list_entry) {
9124                         rinfo = list_itr->rule_info;
9125
9126                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9127                                 map_info = list_itr->vsi_list_info;
9128                                 if (!map_info)
9129                                         continue;
9130
9131                                 if (!ice_is_bit_set(map_info->vsi_map,
9132                                                     vsi_handle))
9133                                         continue;
9134                         } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9135                                 continue;
9136                         }
9137
9138                         rinfo.sw_act.vsi_handle = vsi_handle;
9139                         status = ice_rem_adv_rule(hw, list_itr->lkups,
9140                                                   list_itr->lkups_cnt, &rinfo);
9141
9142                         if (status)
9143                                 return status;
9144                 }
9145         }
9146         return ICE_SUCCESS;
9147 }
9148
9149 /**
9150  * ice_replay_fltr - Replay all the filters stored by a specific list head
9151  * @hw: pointer to the hardware structure
9152  * @list_head: list for which filters needs to be replayed
9153  * @recp_id: Recipe ID for which rules need to be replayed
9154  */
9155 static enum ice_status
9156 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9157 {
9158         struct ice_fltr_mgmt_list_entry *itr;
9159         enum ice_status status = ICE_SUCCESS;
9160         struct ice_sw_recipe *recp_list;
9161         u8 lport = hw->port_info->lport;
9162         struct LIST_HEAD_TYPE l_head;
9163
9164         if (LIST_EMPTY(list_head))
9165                 return status;
9166
9167         recp_list = &hw->switch_info->recp_list[recp_id];
9168         /* Move entries from the given list_head to a temporary l_head so that
9169          * they can be replayed. Otherwise when trying to re-add the same
9170          * filter, the function will return already exists
9171          */
9172         LIST_REPLACE_INIT(list_head, &l_head);
9173
9174         /* Mark the given list_head empty by reinitializing it so filters
9175          * could be added again by *handler
9176          */
9177         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9178                             list_entry) {
9179                 struct ice_fltr_list_entry f_entry;
9180                 u16 vsi_handle;
9181
9182                 f_entry.fltr_info = itr->fltr_info;
9183                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9184                         status = ice_add_rule_internal(hw, recp_list, lport,
9185                                                        &f_entry);
9186                         if (status != ICE_SUCCESS)
9187                                 goto end;
9188                         continue;
9189                 }
9190
9191                 /* Add a filter per VSI separately */
9192                 ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9193                                      ICE_MAX_VSI) {
9194                         if (!ice_is_vsi_valid(hw, vsi_handle))
9195                                 break;
9196
9197                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9198                         f_entry.fltr_info.vsi_handle = vsi_handle;
9199                         f_entry.fltr_info.fwd_id.hw_vsi_id =
9200                                 ice_get_hw_vsi_num(hw, vsi_handle);
9201                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9202                         if (recp_id == ICE_SW_LKUP_VLAN)
9203                                 status = ice_add_vlan_internal(hw, recp_list,
9204                                                                &f_entry);
9205                         else
9206                                 status = ice_add_rule_internal(hw, recp_list,
9207                                                                lport,
9208                                                                &f_entry);
9209                         if (status != ICE_SUCCESS)
9210                                 goto end;
9211                 }
9212         }
9213 end:
9214         /* Clear the filter management list */
9215         ice_rem_sw_rule_info(hw, &l_head);
9216         return status;
9217 }
9218
9219 /**
9220  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9221  * @hw: pointer to the hardware structure
9222  *
9223  * NOTE: This function does not clean up partially added filters on error.
9224  * It is up to caller of the function to issue a reset or fail early.
9225  */
9226 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9227 {
9228         struct ice_switch_info *sw = hw->switch_info;
9229         enum ice_status status = ICE_SUCCESS;
9230         u8 i;
9231
9232         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9233                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9234
9235                 status = ice_replay_fltr(hw, i, head);
9236                 if (status != ICE_SUCCESS)
9237                         return status;
9238         }
9239         return status;
9240 }
9241
9242 /**
9243  * ice_replay_vsi_fltr - Replay filters for requested VSI
9244  * @hw: pointer to the hardware structure
9245  * @pi: pointer to port information structure
9246  * @sw: pointer to switch info struct for which function replays filters
9247  * @vsi_handle: driver VSI handle
9248  * @recp_id: Recipe ID for which rules need to be replayed
9249  * @list_head: list for which filters need to be replayed
9250  *
9251  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9252  * It is required to pass valid VSI handle.
9253  */
9254 static enum ice_status
9255 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9256                     struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9257                     struct LIST_HEAD_TYPE *list_head)
9258 {
9259         struct ice_fltr_mgmt_list_entry *itr;
9260         enum ice_status status = ICE_SUCCESS;
9261         struct ice_sw_recipe *recp_list;
9262         u16 hw_vsi_id;
9263
9264         if (LIST_EMPTY(list_head))
9265                 return status;
9266         recp_list = &sw->recp_list[recp_id];
9267         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9268
9269         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9270                             list_entry) {
9271                 struct ice_fltr_list_entry f_entry;
9272
9273                 f_entry.fltr_info = itr->fltr_info;
9274                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9275                     itr->fltr_info.vsi_handle == vsi_handle) {
9276                         /* update the src in case it is VSI num */
9277                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9278                                 f_entry.fltr_info.src = hw_vsi_id;
9279                         status = ice_add_rule_internal(hw, recp_list,
9280                                                        pi->lport,
9281                                                        &f_entry);
9282                         if (status != ICE_SUCCESS)
9283                                 goto end;
9284                         continue;
9285                 }
9286                 if (!itr->vsi_list_info ||
9287                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9288                         continue;
9289                 /* Clearing it so that the logic can add it back */
9290                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9291                 f_entry.fltr_info.vsi_handle = vsi_handle;
9292                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9293                 /* update the src in case it is VSI num */
9294                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9295                         f_entry.fltr_info.src = hw_vsi_id;
9296                 if (recp_id == ICE_SW_LKUP_VLAN)
9297                         status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9298                 else
9299                         status = ice_add_rule_internal(hw, recp_list,
9300                                                        pi->lport,
9301                                                        &f_entry);
9302                 if (status != ICE_SUCCESS)
9303                         goto end;
9304         }
9305 end:
9306         return status;
9307 }
9308
9309 /**
9310  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9311  * @hw: pointer to the hardware structure
9312  * @vsi_handle: driver VSI handle
9313  * @list_head: list for which filters need to be replayed
9314  *
9315  * Replay the advanced rule for the given VSI.
9316  */
9317 static enum ice_status
9318 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9319                         struct LIST_HEAD_TYPE *list_head)
9320 {
9321         struct ice_rule_query_data added_entry = { 0 };
9322         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9323         enum ice_status status = ICE_SUCCESS;
9324
9325         if (LIST_EMPTY(list_head))
9326                 return status;
9327         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9328                             list_entry) {
9329                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9330                 u16 lk_cnt = adv_fltr->lkups_cnt;
9331
9332                 if (vsi_handle != rinfo->sw_act.vsi_handle)
9333                         continue;
9334                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9335                                           &added_entry);
9336                 if (status)
9337                         break;
9338         }
9339         return status;
9340 }
9341
9342 /**
9343  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9344  * @hw: pointer to the hardware structure
9345  * @pi: pointer to port information structure
9346  * @vsi_handle: driver VSI handle
9347  *
9348  * Replays filters for requested VSI via vsi_handle.
9349  */
9350 enum ice_status
9351 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9352                         u16 vsi_handle)
9353 {
9354         struct ice_switch_info *sw = hw->switch_info;
9355         enum ice_status status;
9356         u8 i;
9357
9358         /* Update the recipes that were created */
9359         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9360                 struct LIST_HEAD_TYPE *head;
9361
9362                 head = &sw->recp_list[i].filt_replay_rules;
9363                 if (!sw->recp_list[i].adv_rule)
9364                         status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9365                                                      head);
9366                 else
9367                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9368                 if (status != ICE_SUCCESS)
9369                         return status;
9370         }
9371
9372         return ICE_SUCCESS;
9373 }
9374
9375 /**
9376  * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9377  * @hw: pointer to the HW struct
9378  * @sw: pointer to switch info struct for which function removes filters
9379  *
9380  * Deletes the filter replay rules for given switch
9381  */
9382 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9383 {
9384         u8 i;
9385
9386         if (!sw)
9387                 return;
9388
9389         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9390                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9391                         struct LIST_HEAD_TYPE *l_head;
9392
9393                         l_head = &sw->recp_list[i].filt_replay_rules;
9394                         if (!sw->recp_list[i].adv_rule)
9395                                 ice_rem_sw_rule_info(hw, l_head);
9396                         else
9397                                 ice_rem_adv_rule_info(hw, l_head);
9398                 }
9399         }
9400 }
9401
9402 /**
9403  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9404  * @hw: pointer to the HW struct
9405  *
9406  * Deletes the filter replay rules.
9407  */
9408 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9409 {
9410         ice_rm_sw_replay_rule_info(hw, hw->switch_info);
9411 }